disable IMQ on 2.6.25 until we have a new patch
[openwrt.git] / toolchain / gcc / patches / 4.1.2 / 500-avr32.patch
1 diff -Nur gcc-4.1.2/config.sub gcc-4.1.2-owrt/config.sub
2 --- gcc-4.1.2/config.sub 2005-12-16 13:57:40.000000000 +0100
3 +++ gcc-4.1.2-owrt/config.sub 2007-05-24 12:03:28.000000000 +0200
4 @@ -239,7 +239,7 @@
5 | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
6 | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
7 | am33_2.0 \
8 - | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \
9 + | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
10 | bfin \
11 | c4x | clipper \
12 | d10v | d30v | dlx | dsp16xx \
13 @@ -316,7 +316,7 @@
14 | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
15 | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
16 | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
17 - | avr-* \
18 + | avr-* | avr32-* \
19 | bfin-* | bs2000-* \
20 | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
21 | clipper-* | craynv-* | cydra-* \
22 diff -Nur gcc-4.1.2/configure.in gcc-4.1.2-owrt/configure.in
23 --- gcc-4.1.2/configure.in 2006-11-21 18:48:36.000000000 +0100
24 +++ gcc-4.1.2-owrt/configure.in 2007-05-24 12:03:28.000000000 +0200
25 @@ -497,6 +497,9 @@
26 arm-*-riscix*)
27 noconfigdirs="$noconfigdirs ld target-libgloss ${libgcj}"
28 ;;
29 + avr32-*-*)
30 + noconfigdirs="$noconfigdirs target-libiberty target-libmudflap target-libffi ${libgcj}"
31 + ;;
32 avr-*-*)
33 noconfigdirs="$noconfigdirs target-libiberty target-libstdc++-v3 ${libgcj}"
34 ;;
35 diff -Nur gcc-4.1.2/gcc/builtins.c gcc-4.1.2-owrt/gcc/builtins.c
36 --- gcc-4.1.2/gcc/builtins.c 2006-10-06 19:06:52.000000000 +0200
37 +++ gcc-4.1.2-owrt/gcc/builtins.c 2007-05-24 12:03:28.000000000 +0200
38 @@ -9228,7 +9228,7 @@
39
40 do
41 {
42 - code = va_arg (ap, enum tree_code);
43 + code = va_arg (ap, int);
44 switch (code)
45 {
46 case 0:
47 diff -Nur gcc-4.1.2/gcc/calls.c gcc-4.1.2-owrt/gcc/calls.c
48 --- gcc-4.1.2/gcc/calls.c 2007-01-29 18:08:31.000000000 +0100
49 +++ gcc-4.1.2-owrt/gcc/calls.c 2007-05-24 12:03:28.000000000 +0200
50 @@ -3434,7 +3434,7 @@
51 for (; count < nargs; count++)
52 {
53 rtx val = va_arg (p, rtx);
54 - enum machine_mode mode = va_arg (p, enum machine_mode);
55 + enum machine_mode mode = va_arg (p, int);
56
57 /* We cannot convert the arg value to the mode the library wants here;
58 must do it earlier where we know the signedness of the arg. */
59 diff -Nur gcc-4.1.2/gcc/config/avr32/avr32.c gcc-4.1.2-owrt/gcc/config/avr32/avr32.c
60 --- gcc-4.1.2/gcc/config/avr32/avr32.c 1970-01-01 01:00:00.000000000 +0100
61 +++ gcc-4.1.2-owrt/gcc/config/avr32/avr32.c 2007-05-24 12:03:28.000000000 +0200
62 @@ -0,0 +1,7273 @@
63 +/*
64 + Target hooks and helper functions for AVR32.
65 + Copyright 2003-2006 Atmel Corporation.
66 +
67 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
68 + Initial porting by Anders �dland.
69 +
70 + This file is part of GCC.
71 +
72 + This program is free software; you can redistribute it and/or modify
73 + it under the terms of the GNU General Public License as published by
74 + the Free Software Foundation; either version 2 of the License, or
75 + (at your option) any later version.
76 +
77 + This program is distributed in the hope that it will be useful,
78 + but WITHOUT ANY WARRANTY; without even the implied warranty of
79 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
80 + GNU General Public License for more details.
81 +
82 + You should have received a copy of the GNU General Public License
83 + along with this program; if not, write to the Free Software
84 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
85 +
86 +#include "config.h"
87 +#include "system.h"
88 +#include "coretypes.h"
89 +#include "tm.h"
90 +#include "rtl.h"
91 +#include "tree.h"
92 +#include "obstack.h"
93 +#include "regs.h"
94 +#include "hard-reg-set.h"
95 +#include "real.h"
96 +#include "insn-config.h"
97 +#include "conditions.h"
98 +#include "output.h"
99 +#include "insn-attr.h"
100 +#include "flags.h"
101 +#include "reload.h"
102 +#include "function.h"
103 +#include "expr.h"
104 +#include "optabs.h"
105 +#include "toplev.h"
106 +#include "recog.h"
107 +#include "ggc.h"
108 +#include "except.h"
109 +#include "c-pragma.h"
110 +#include "integrate.h"
111 +#include "tm_p.h"
112 +#include "langhooks.h"
113 +
114 +#include "target.h"
115 +#include "target-def.h"
116 +
117 +#include <ctype.h>
118 +
119 +/* Forward definitions of types. */
120 +typedef struct minipool_node Mnode;
121 +typedef struct minipool_fixup Mfix;
122 +
123 +/* Obstack for minipool constant handling. */
124 +static struct obstack minipool_obstack;
125 +static char *minipool_startobj;
126 +static rtx minipool_vector_label;
127 +
128 +/* True if we are currently building a constant table. */
129 +int making_const_table;
130 +
131 +/* Some forward function declarations */
132 +static unsigned long avr32_isr_value (tree);
133 +static unsigned long avr32_compute_func_type (void);
134 +static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *);
135 +static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *);
136 +static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args,
137 + int flags, bool * no_add_attrs);
138 +static void avr32_reorg (void);
139 +bool avr32_return_in_msb (tree type);
140 +bool avr32_vector_mode_supported (enum machine_mode mode);
141 +static void avr32_init_libfuncs (void);
142 +void avr32_load_pic_register (void);
143 +
144 +
145 +static void
146 +avr32_add_gc_roots (void)
147 +{
148 + gcc_obstack_init (&minipool_obstack);
149 + minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
150 +}
151 +
152 +
153 +/* List of all known AVR32 parts */
154 +static const struct part_type_s avr32_part_types[] = {
155 + /* name, part_type, architecture type, macro */
156 + {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
157 + {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
158 + {"ap7010", PART_TYPE_AVR32_AP7010, ARCH_TYPE_AVR32_AP, "__AVR32_AP7010__"},
159 + {"ap7020", PART_TYPE_AVR32_AP7020, ARCH_TYPE_AVR32_AP, "__AVR32_AP7020__"},
160 + {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A0256__"},
161 + {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A0512__"},
162 + {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A1128__"},
163 + {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A1256__"},
164 + {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UC, "__AVR32_UC3A1512__"},
165 + {NULL, 0, 0, NULL}
166 +};
167 +
168 +/* List of all known AVR32 architectures */
169 +static const struct arch_type_s avr32_arch_types[] = {
170 + /* name, architecture type, microarchitecture type, feature flags, macro */
171 + {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B, FLAG_AVR32_HAS_DSP |
172 + FLAG_AVR32_HAS_SIMD | FLAG_AVR32_HAS_UNALIGNED_WORD |
173 + FLAG_AVR32_HAS_BRANCH_PRED, "__AVR32_AP__"},
174 + {"uc", ARCH_TYPE_AVR32_UC, UARCH_TYPE_AVR32A,
175 + FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW, "__AVR32_UC__"},
176 + {NULL, 0, 0, 0, NULL}
177 +};
178 +
179 +/* Default arch name */
180 +const char *avr32_arch_name = "ap";
181 +const char *avr32_part_name = "none";
182 +
183 +const struct part_type_s *avr32_part;
184 +const struct arch_type_s *avr32_arch;
185 +
186 +
187 +/* Override command line options */
188 +void
189 +avr32_override_options (void)
190 +{
191 + const struct part_type_s *part;
192 + const struct arch_type_s *arch;
193 +
194 + /* Check if part type is set. */
195 + for (part = avr32_part_types; part->name; part++)
196 + if (strcmp (part->name, avr32_part_name) == 0)
197 + break;
198 +
199 + avr32_part = part;
200 +
201 + if (!part->name)
202 + {
203 + fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
204 + avr32_part_name);
205 + for (part = avr32_part_types; part->name; part++)
206 + fprintf (stderr, "\t%s\n", part->name);
207 + avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
208 + }
209 +
210 + avr32_arch = &avr32_arch_types[avr32_part->arch_type];
211 +
212 + /* If part was set to "none" then check if arch was set. */
213 + if (strcmp (avr32_part->name, "none") == 0)
214 + {
215 + /* Check if arch type is set. */
216 + for (arch = avr32_arch_types; arch->name; arch++)
217 + if (strcmp (arch->name, avr32_arch_name) == 0)
218 + break;
219 +
220 + avr32_arch = arch;
221 +
222 + if (!arch->name)
223 + {
224 + fprintf (stderr, "Unknown arch `%s' specified\nKnown arch names:\n",
225 + avr32_arch_name);
226 + for (arch = avr32_arch_types; arch->name; arch++)
227 + fprintf (stderr, "\t%s\n", arch->name);
228 + avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
229 + }
230 + }
231 +
232 + /* If optimization level is two or greater, then align start of loops to a
233 + word boundary since this will allow folding the first insn of the loop.
234 + Do this only for targets supporting branch prediction. */
235 + if (optimize >= 2 && TARGET_BRANCH_PRED)
236 + align_loops = 2;
237 +
238 + if (AVR32_ALWAYS_PIC)
239 + flag_pic = 1;
240 +
241 + if (TARGET_NO_PIC)
242 + flag_pic = 0;
243 +
244 + avr32_add_gc_roots ();
245 +}
246 +
247 +
248 +/*
249 +If defined, a function that outputs the assembler code for entry to a
250 +function. The prologue is responsible for setting up the stack frame,
251 +initializing the frame pointer register, saving registers that must be
252 +saved, and allocating size additional bytes of storage for the
253 +local variables. size is an integer. file is a stdio
254 +stream to which the assembler code should be output.
255 +
256 +The label for the beginning of the function need not be output by this
257 +macro. That has already been done when the macro is run.
258 +
259 +To determine which registers to save, the macro can refer to the array
260 +regs_ever_live: element r is nonzero if hard register
261 +r is used anywhere within the function. This implies the function
262 +prologue should save register r, provided it is not one of the
263 +call-used registers. (TARGET_ASM_FUNCTION_EPILOGUE must likewise use
264 +regs_ever_live.)
265 +
266 +On machines that have ``register windows'', the function entry code does
267 +not save on the stack the registers that are in the windows, even if
268 +they are supposed to be preserved by function calls; instead it takes
269 +appropriate steps to ``push'' the register stack, if any non-call-used
270 +registers are used in the function.
271 +
272 +On machines where functions may or may not have frame-pointers, the
273 +function entry code must vary accordingly; it must set up the frame
274 +pointer if one is wanted, and not otherwise. To determine whether a
275 +frame pointer is in wanted, the macro can refer to the variable
276 +frame_pointer_needed. The variable's value will be 1 at run
277 +time in a function that needs a frame pointer. (see Elimination).
278 +
279 +The function entry code is responsible for allocating any stack space
280 +required for the function. This stack space consists of the regions
281 +listed below. In most cases, these regions are allocated in the
282 +order listed, with the last listed region closest to the top of the
283 +stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and
284 +the highest address if it is not defined). You can use a different order
285 +for a machine if doing so is more convenient or required for
286 +compatibility reasons. Except in cases where required by standard
287 +or by a debugger, there is no reason why the stack layout used by GCC
288 +need agree with that used by other compilers for a machine.
289 +*/
290 +
291 +#undef TARGET_ASM_FUNCTION_PROLOGUE
292 +#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue
293 +
294 +
295 +#undef TARGET_DEFAULT_SHORT_ENUMS
296 +#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false
297 +
298 +#undef TARGET_PROMOTE_FUNCTION_ARGS
299 +#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
300 +
301 +#undef TARGET_PROMOTE_FUNCTION_RETURN
302 +#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
303 +
304 +#undef TARGET_PROMOTE_PROTOTYPES
305 +#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
306 +
307 +#undef TARGET_MUST_PASS_IN_STACK
308 +#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack
309 +
310 +#undef TARGET_PASS_BY_REFERENCE
311 +#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference
312 +
313 +#undef TARGET_STRICT_ARGUMENT_NAMING
314 +#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming
315 +
316 +#undef TARGET_VECTOR_MODE_SUPPORTED_P
317 +#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported
318 +
319 +#undef TARGET_RETURN_IN_MEMORY
320 +#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory
321 +
322 +#undef TARGET_RETURN_IN_MSB
323 +#define TARGET_RETURN_IN_MSB avr32_return_in_msb
324 +
325 +#undef TARGET_ARG_PARTIAL_BYTES
326 +#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes
327 +
328 +#undef TARGET_STRIP_NAME_ENCODING
329 +#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding
330 +
331 +#define streq(string1, string2) (strcmp (string1, string2) == 0)
332 +
333 +#undef TARGET_ATTRIBUTE_TABLE
334 +#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table
335 +
336 +#undef TARGET_COMP_TYPE_ATTRIBUTES
337 +#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes
338 +
339 +
340 +#undef TARGET_RTX_COSTS
341 +#define TARGET_RTX_COSTS avr32_rtx_costs
342 +
343 +#undef TARGET_CANNOT_FORCE_CONST_MEM
344 +#define TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem
345 +
346 +#undef TARGET_ASM_INTEGER
347 +#define TARGET_ASM_INTEGER avr32_assemble_integer
348 +
349 +/*
350 + * Switches to the appropriate section for output of constant pool
351 + * entry x in mode. You can assume that x is some kind of constant in
352 + * RTL. The argument mode is redundant except in the case of a
353 + * const_int rtx. Select the section by calling readonly_data_ section
354 + * or one of the alternatives for other sections. align is the
355 + * constant alignment in bits.
356 + *
357 + * The default version of this function takes care of putting symbolic
358 + * constants in flag_ pic mode in data_section and everything else in
359 + * readonly_data_section.
360 + */
361 +#undef TARGET_ASM_SELECT_RTX_SECTION
362 +#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section
363 +
364 +
365 +/*
366 + * If non-null, this hook performs a target-specific pass over the
367 + * instruction stream. The compiler will run it at all optimization
368 + * levels, just before the point at which it normally does
369 + * delayed-branch scheduling.
370 + *
371 + * The exact purpose of the hook varies from target to target. Some
372 + * use it to do transformations that are necessary for correctness,
373 + * such as laying out in-function constant pools or avoiding hardware
374 + * hazards. Others use it as an opportunity to do some
375 + * machine-dependent optimizations.
376 + *
377 + * You need not implement the hook if it has nothing to do. The
378 + * default definition is null.
379 + */
380 +#undef TARGET_MACHINE_DEPENDENT_REORG
381 +#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg
382 +
383 +/* Target hook for assembling integer objects.
384 + Need to handle integer vectors */
385 +static bool
386 +avr32_assemble_integer (rtx x, unsigned int size, int aligned_p)
387 +{
388 + if (avr32_vector_mode_supported (GET_MODE (x)))
389 + {
390 + int i, units;
391 +
392 + if (GET_CODE (x) != CONST_VECTOR)
393 + abort ();
394 +
395 + units = CONST_VECTOR_NUNITS (x);
396 +
397 + switch (GET_MODE (x))
398 + {
399 + case V2HImode:
400 + size = 2;
401 + break;
402 + case V4QImode:
403 + size = 1;
404 + break;
405 + default:
406 + abort ();
407 + }
408 +
409 + for (i = 0; i < units; i++)
410 + {
411 + rtx elt;
412 +
413 + elt = CONST_VECTOR_ELT (x, i);
414 + assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
415 + }
416 +
417 + return true;
418 + }
419 +
420 + return default_assemble_integer (x, size, aligned_p);
421 +}
422 +
423 +/*
424 + * This target hook describes the relative costs of RTL expressions.
425 + *
426 + * The cost may depend on the precise form of the expression, which is
427 + * available for examination in x, and the rtx code of the expression
428 + * in which it is contained, found in outer_code. code is the
429 + * expression code--redundant, since it can be obtained with GET_CODE
430 + * (x).
431 + *
432 + * In implementing this hook, you can use the construct COSTS_N_INSNS
433 + * (n) to specify a cost equal to n fast instructions.
434 + *
435 + * On entry to the hook, *total contains a default estimate for the
436 + * cost of the expression. The hook should modify this value as
437 + * necessary. Traditionally, the default costs are COSTS_N_INSNS (5)
438 + * for multiplications, COSTS_N_INSNS (7) for division and modulus
439 + * operations, and COSTS_N_INSNS (1) for all other operations.
440 + *
441 + * When optimizing for code size, i.e. when optimize_size is non-zero,
442 + * this target hook should be used to estimate the relative size cost
443 + * of an expression, again relative to COSTS_N_INSNS.
444 + *
445 + * The hook returns true when all subexpressions of x have been
446 + * processed, and false when rtx_cost should recurse.
447 + */
448 +
449 +/* Worker routine for avr32_rtx_costs. */
450 +static inline int
451 +avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED,
452 + enum rtx_code outer ATTRIBUTE_UNUSED)
453 +{
454 + enum machine_mode mode = GET_MODE (x);
455 +
456 + switch (GET_CODE (x))
457 + {
458 + case MEM:
459 + /* Using pre decrement / post increment memory operations on the
460 + avr32_uc architecture means that two writebacks must be performed
461 + and hence two cycles are needed. */
462 + if (!optimize_size
463 + && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
464 + && avr32_arch->arch_type == ARCH_TYPE_AVR32_UC
465 + && (GET_CODE (XEXP (x, 0)) == PRE_DEC
466 + || GET_CODE (XEXP (x, 0)) == POST_INC))
467 + return COSTS_N_INSNS (4);
468 +
469 + /* Memory costs quite a lot for the first word, but subsequent words
470 + load at the equivalent of a single insn each. */
471 + if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
472 + return COSTS_N_INSNS (2 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
473 +
474 + return COSTS_N_INSNS (3);
475 + case SYMBOL_REF:
476 + case CONST:
477 + /* These are valid for the pseudo insns: lda.w and call which operates
478 + on direct addresses. We assume that the cost of a lda.w is the same
479 + as the cost of a ld.w insn. */
480 + return (outer == SET) ? COSTS_N_INSNS (3) : COSTS_N_INSNS (1);
481 + case DIV:
482 + case MOD:
483 + case UDIV:
484 + case UMOD:
485 + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
486 +
487 + case ROTATE:
488 + case ROTATERT:
489 + if (mode == TImode)
490 + return COSTS_N_INSNS (100);
491 +
492 + if (mode == DImode)
493 + return COSTS_N_INSNS (10);
494 + return COSTS_N_INSNS (4);
495 + case ASHIFT:
496 + case LSHIFTRT:
497 + case ASHIFTRT:
498 + case NOT:
499 + if (mode == TImode)
500 + return COSTS_N_INSNS (10);
501 +
502 + if (mode == DImode)
503 + return COSTS_N_INSNS (4);
504 + return COSTS_N_INSNS (1);
505 + case PLUS:
506 + case MINUS:
507 + case NEG:
508 + case COMPARE:
509 + case ABS:
510 + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
511 + return COSTS_N_INSNS (100);
512 +
513 + if (mode == TImode)
514 + return COSTS_N_INSNS (50);
515 +
516 + if (mode == DImode)
517 + return COSTS_N_INSNS (2);
518 + return COSTS_N_INSNS (1);
519 +
520 + case MULT:
521 + {
522 + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
523 + return COSTS_N_INSNS (300);
524 +
525 + if (mode == TImode)
526 + return COSTS_N_INSNS (16);
527 +
528 + if (mode == DImode)
529 + return COSTS_N_INSNS (4);
530 +
531 + if (mode == HImode)
532 + return COSTS_N_INSNS (2);
533 +
534 + return COSTS_N_INSNS (3);
535 + }
536 + case IF_THEN_ELSE:
537 + if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
538 + return COSTS_N_INSNS (4);
539 + return COSTS_N_INSNS (1);
540 + case SIGN_EXTEND:
541 + case ZERO_EXTEND:
542 + /* Sign/Zero extensions of registers cost quite much since these
543 + instrcutions only take one register operand which means that gcc
544 + often must insert some move instrcutions */
545 + if (mode == QImode || mode == HImode)
546 + return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
547 + return COSTS_N_INSNS (4);
548 + case UNSPEC:
549 + /* divmod operations */
550 + if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL
551 + || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
552 + {
553 + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
554 + }
555 + /* Fallthrough */
556 + default:
557 + return COSTS_N_INSNS (1);
558 + }
559 +}
560 +
561 +static bool
562 +avr32_rtx_costs (rtx x, int code, int outer_code, int *total)
563 +{
564 + *total = avr32_rtx_costs_1 (x, code, outer_code);
565 + return true;
566 +}
567 +
568 +
569 +bool
570 +avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
571 +{
572 + /* Do not want symbols in the constant pool when compiling pic or if using
573 + address pseudo instructions. */
574 + return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
575 + && avr32_find_symbol (x) != NULL_RTX);
576 +}
577 +
578 +
579 +/* Table of machine attributes. */
580 +const struct attribute_spec avr32_attribute_table[] = {
581 + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
582 + /* Interrupt Service Routines have special prologue and epilogue
583 + requirements. */
584 + {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
585 + {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
586 + {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
587 + {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
588 + {NULL, 0, 0, false, false, false, NULL}
589 +};
590 +
591 +
592 +typedef struct
593 +{
594 + const char *const arg;
595 + const unsigned long return_value;
596 +}
597 +isr_attribute_arg;
598 +
599 +static const isr_attribute_arg isr_attribute_args[] = {
600 + {"FULL", AVR32_FT_ISR_FULL},
601 + {"full", AVR32_FT_ISR_FULL},
602 + {"HALF", AVR32_FT_ISR_HALF},
603 + {"half", AVR32_FT_ISR_HALF},
604 + {"NONE", AVR32_FT_ISR_NONE},
605 + {"none", AVR32_FT_ISR_NONE},
606 + {"UNDEF", AVR32_FT_ISR_NONE},
607 + {"undef", AVR32_FT_ISR_NONE},
608 + {"SWI", AVR32_FT_ISR_NONE},
609 + {"swi", AVR32_FT_ISR_NONE},
610 + {NULL, AVR32_FT_ISR_NONE}
611 +};
612 +
613 +/* Returns the (interrupt) function type of the current
614 + function, or AVR32_FT_UNKNOWN if the type cannot be determined. */
615 +
616 +static unsigned long
617 +avr32_isr_value (tree argument)
618 +{
619 + const isr_attribute_arg *ptr;
620 + const char *arg;
621 +
622 + /* No argument - default to ISR_NONE. */
623 + if (argument == NULL_TREE)
624 + return AVR32_FT_ISR_NONE;
625 +
626 + /* Get the value of the argument. */
627 + if (TREE_VALUE (argument) == NULL_TREE
628 + || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
629 + return AVR32_FT_UNKNOWN;
630 +
631 + arg = TREE_STRING_POINTER (TREE_VALUE (argument));
632 +
633 + /* Check it against the list of known arguments. */
634 + for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
635 + if (streq (arg, ptr->arg))
636 + return ptr->return_value;
637 +
638 + /* An unrecognized interrupt type. */
639 + return AVR32_FT_UNKNOWN;
640 +}
641 +
642 +
643 +
644 +/*
645 +These hooks specify assembly directives for creating certain kinds
646 +of integer object. The TARGET_ASM_BYTE_OP directive creates a
647 +byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an
648 +aligned two-byte object, and so on. Any of the hooks may be
649 +NULL, indicating that no suitable directive is available.
650 +
651 +The compiler will print these strings at the start of a new line,
652 +followed immediately by the object's initial value. In most cases,
653 +the string should contain a tab, a pseudo-op, and then another tab.
654 +*/
655 +#undef TARGET_ASM_BYTE_OP
656 +#define TARGET_ASM_BYTE_OP "\t.byte\t"
657 +#undef TARGET_ASM_ALIGNED_HI_OP
658 +#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t"
659 +#undef TARGET_ASM_ALIGNED_SI_OP
660 +#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t"
661 +#undef TARGET_ASM_ALIGNED_DI_OP
662 +#define TARGET_ASM_ALIGNED_DI_OP NULL
663 +#undef TARGET_ASM_ALIGNED_TI_OP
664 +#define TARGET_ASM_ALIGNED_TI_OP NULL
665 +#undef TARGET_ASM_UNALIGNED_HI_OP
666 +#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
667 +#undef TARGET_ASM_UNALIGNED_SI_OP
668 +#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t"
669 +#undef TARGET_ASM_UNALIGNED_DI_OP
670 +#define TARGET_ASM_UNALIGNED_DI_OP NULL
671 +#undef TARGET_ASM_UNALIGNED_TI_OP
672 +#define TARGET_ASM_UNALIGNED_TI_OP NULL
673 +
674 +#undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
675 +#define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE avr32_sched_use_dfa_pipeline_interface
676 +
677 +#undef TARGET_ASM_OUTPUT_MI_THUNK
678 +#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk
679 +
680 +
681 +static void
682 +avr32_output_mi_thunk (FILE * file,
683 + tree thunk ATTRIBUTE_UNUSED,
684 + HOST_WIDE_INT delta,
685 + HOST_WIDE_INT vcall_offset, tree function)
686 +{
687 + int mi_delta = delta;
688 + int this_regno =
689 + (avr32_return_in_memory (DECL_RESULT (function), TREE_TYPE (function)) ?
690 + INTERNAL_REGNUM (11) : INTERNAL_REGNUM (12));
691 +
692 +
693 + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
694 + || vcall_offset)
695 + {
696 + fprintf (file, "\tpushm\tr10\n");
697 + }
698 +
699 +
700 + if (mi_delta != 0)
701 + {
702 + if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21"))
703 + {
704 + fprintf (file, "\tsub\t%s, -0x%x\n", reg_names[this_regno],
705 + mi_delta);
706 + }
707 + else
708 + {
709 + /* Immediate is larger than k21 we must make us a temp register by
710 + pushing a register to the stack. */
711 + fprintf (file, "\tmov\tr10, lo(%x)\n", mi_delta);
712 + fprintf (file, "\torh\tr10, hi(%x)\n", mi_delta);
713 + fprintf (file, "\tadd\t%s, r10\n", reg_names[this_regno]);
714 + }
715 + }
716 +
717 +
718 + if (vcall_offset != 0)
719 + {
720 + fprintf (file, "\tld.w\tr10, %s[0]\n", reg_names[this_regno]);
721 + fprintf (file, "\tld.w\tr10, r10[%i]\n", (int) vcall_offset);
722 + fprintf (file, "\tadd\t%s, r10\n", reg_names[this_regno]);
723 + }
724 +
725 +
726 + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
727 + || vcall_offset)
728 + {
729 + fprintf (file, "\tpopm\tr10\n");
730 + }
731 +
732 + if (flag_pic)
733 + {
734 + /* Don't know how we should do this!!! For now we'll just use an
735 + extended branch instruction and hope that the function will be
736 + reached. */
737 + fprintf (file, "\tbral\t");
738 + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
739 + fputc ('\n', file);
740 + }
741 + else
742 + {
743 + fprintf (file, "\tlddpc\tpc, 0f\n");
744 + fprintf (file, "\t.align 2\n");
745 + fputs ("0:\t.long\t", file);
746 + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
747 + fputc ('\n', file);
748 + }
749 +}
750 +
751 +/* Implements target hook vector_mode_supported. */
752 +bool
753 +avr32_vector_mode_supported (enum machine_mode mode)
754 +{
755 + if ((mode == V2HImode) || (mode == V4QImode))
756 + return true;
757 +
758 + return false;
759 +}
760 +
761 +
762 +#undef TARGET_INIT_LIBFUNCS
763 +#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs
764 +
765 +#undef TARGET_INIT_BUILTINS
766 +#define TARGET_INIT_BUILTINS avr32_init_builtins
767 +
768 +#undef TARGET_EXPAND_BUILTIN
769 +#define TARGET_EXPAND_BUILTIN avr32_expand_builtin
770 +
771 +tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int,
772 + void_ftype_ptr_int;
773 +tree void_ftype_int, void_ftype_void, int_ftype_ptr_int;
774 +tree short_ftype_short, int_ftype_int_short, int_ftype_short_short,
775 + short_ftype_short_short;
776 +tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short;
777 +tree void_ftype_int_int_int_int_int, void_ftype_int_int_int;
778 +tree longlong_ftype_int_int, void_ftype_int_int_longlong;
779 +tree int_ftype_int_int_int, longlong_ftype_longlong_int_short;
780 +tree longlong_ftype_longlong_short_short, int_ftype_int_short_short;
781 +
782 +#define def_builtin(NAME, TYPE, CODE) \
783 + lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
784 + BUILT_IN_MD, NULL, NULL_TREE)
785 +
786 +#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
787 + do \
788 + { \
789 + if ((MASK)) \
790 + lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
791 + BUILT_IN_MD, NULL, NULL_TREE); \
792 + } \
793 + while (0)
794 +
795 +struct builtin_description
796 +{
797 + const unsigned int mask;
798 + const enum insn_code icode;
799 + const char *const name;
800 + const int code;
801 + const enum rtx_code comparison;
802 + const unsigned int flag;
803 + const tree *ftype;
804 +};
805 +
806 +static const struct builtin_description bdesc_2arg[] = {
807 +#define DSP_BUILTIN(code, builtin, ftype) \
808 + { 1, CODE_FOR_##code, "__builtin_" #code , \
809 + AVR32_BUILTIN_##builtin, 0, 0, ftype }
810 +
811 + DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
812 + DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
813 + DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
814 + DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
815 + DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
816 + DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
817 + DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
818 + DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
819 + DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
820 + DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
821 + DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
822 +};
823 +
824 +
825 +void
826 +avr32_init_builtins (void)
827 +{
828 + unsigned int i;
829 + const struct builtin_description *d;
830 + tree endlink = void_list_node;
831 + tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
832 + tree longlong_endlink =
833 + tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
834 + tree short_endlink =
835 + tree_cons (NULL_TREE, short_integer_type_node, endlink);
836 + tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
837 +
838 + /* int func (int) */
839 + int_ftype_int = build_function_type (integer_type_node, int_endlink);
840 +
841 + /* short func (short) */
842 + short_ftype_short
843 + = build_function_type (short_integer_type_node, short_endlink);
844 +
845 + /* short func (short, short) */
846 + short_ftype_short_short
847 + = build_function_type (short_integer_type_node,
848 + tree_cons (NULL_TREE, short_integer_type_node,
849 + short_endlink));
850 +
851 + /* long long func (long long, short, short) */
852 + longlong_ftype_longlong_short_short
853 + = build_function_type (long_long_integer_type_node,
854 + tree_cons (NULL_TREE, long_long_integer_type_node,
855 + tree_cons (NULL_TREE,
856 + short_integer_type_node,
857 + short_endlink)));
858 +
859 + /* long long func (short, short) */
860 + longlong_ftype_short_short
861 + = build_function_type (long_long_integer_type_node,
862 + tree_cons (NULL_TREE, short_integer_type_node,
863 + short_endlink));
864 +
865 + /* int func (int, int) */
866 + int_ftype_int_int
867 + = build_function_type (integer_type_node,
868 + tree_cons (NULL_TREE, integer_type_node,
869 + int_endlink));
870 +
871 + /* long long func (int, int) */
872 + longlong_ftype_int_int
873 + = build_function_type (long_long_integer_type_node,
874 + tree_cons (NULL_TREE, integer_type_node,
875 + int_endlink));
876 +
877 + /* long long int func (long long, int, short) */
878 + longlong_ftype_longlong_int_short
879 + = build_function_type (long_long_integer_type_node,
880 + tree_cons (NULL_TREE, long_long_integer_type_node,
881 + tree_cons (NULL_TREE, integer_type_node,
882 + short_endlink)));
883 +
884 + /* long long int func (int, short) */
885 + longlong_ftype_int_short
886 + = build_function_type (long_long_integer_type_node,
887 + tree_cons (NULL_TREE, integer_type_node,
888 + short_endlink));
889 +
890 + /* int func (int, short, short) */
891 + int_ftype_int_short_short
892 + = build_function_type (integer_type_node,
893 + tree_cons (NULL_TREE, integer_type_node,
894 + tree_cons (NULL_TREE,
895 + short_integer_type_node,
896 + short_endlink)));
897 +
898 + /* int func (short, short) */
899 + int_ftype_short_short
900 + = build_function_type (integer_type_node,
901 + tree_cons (NULL_TREE, short_integer_type_node,
902 + short_endlink));
903 +
904 + /* int func (int, short) */
905 + int_ftype_int_short
906 + = build_function_type (integer_type_node,
907 + tree_cons (NULL_TREE, integer_type_node,
908 + short_endlink));
909 +
910 + /* void func (int, int) */
911 + void_ftype_int_int
912 + = build_function_type (void_type_node,
913 + tree_cons (NULL_TREE, integer_type_node,
914 + int_endlink));
915 +
916 + /* void func (int, int, int) */
917 + void_ftype_int_int_int
918 + = build_function_type (void_type_node,
919 + tree_cons (NULL_TREE, integer_type_node,
920 + tree_cons (NULL_TREE, integer_type_node,
921 + int_endlink)));
922 +
923 + /* void func (int, int, long long) */
924 + void_ftype_int_int_longlong
925 + = build_function_type (void_type_node,
926 + tree_cons (NULL_TREE, integer_type_node,
927 + tree_cons (NULL_TREE, integer_type_node,
928 + longlong_endlink)));
929 +
930 + /* void func (int, int, int, int, int) */
931 + void_ftype_int_int_int_int_int
932 + = build_function_type (void_type_node,
933 + tree_cons (NULL_TREE, integer_type_node,
934 + tree_cons (NULL_TREE, integer_type_node,
935 + tree_cons (NULL_TREE,
936 + integer_type_node,
937 + tree_cons
938 + (NULL_TREE,
939 + integer_type_node,
940 + int_endlink)))));
941 +
942 + /* void func (void *, int) */
943 + void_ftype_ptr_int
944 + = build_function_type (void_type_node,
945 + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
946 +
947 + /* void func (int) */
948 + void_ftype_int = build_function_type (void_type_node, int_endlink);
949 +
950 + /* void func (void) */
951 + void_ftype_void = build_function_type (void_type_node, void_endlink);
952 +
953 + /* int func (void) */
954 + int_ftype_void = build_function_type (integer_type_node, void_endlink);
955 +
956 + /* int func (void *, int) */
957 + int_ftype_ptr_int
958 + = build_function_type (integer_type_node,
959 + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
960 +
961 + /* int func (int, int, int) */
962 + int_ftype_int_int_int
963 + = build_function_type (integer_type_node,
964 + tree_cons (NULL_TREE, integer_type_node,
965 + tree_cons (NULL_TREE, integer_type_node,
966 + int_endlink)));
967 +
968 + /* Initialize avr32 builtins. */
969 + def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
970 + def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
971 + def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
972 + def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
973 + def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
974 + def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
975 + def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
976 + def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
977 + def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
978 + def_builtin ("__builtin_breakpoint", void_ftype_void,
979 + AVR32_BUILTIN_BREAKPOINT);
980 + def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
981 + def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
982 + def_builtin ("__builtin_bswap_16", short_ftype_short,
983 + AVR32_BUILTIN_BSWAP16);
984 + def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
985 + def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
986 + AVR32_BUILTIN_COP);
987 + def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
988 + def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
989 + AVR32_BUILTIN_MVRC_W);
990 + def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
991 + AVR32_BUILTIN_MVCR_D);
992 + def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
993 + AVR32_BUILTIN_MVRC_D);
994 + def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
995 + def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
996 + def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
997 + AVR32_BUILTIN_SATRNDS);
998 + def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
999 + AVR32_BUILTIN_SATRNDU);
1000 + def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
1001 + def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
1002 + def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
1003 + AVR32_BUILTIN_MACSATHH_W);
1004 + def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
1005 + AVR32_BUILTIN_MACWH_D);
1006 + def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
1007 + AVR32_BUILTIN_MACHH_D);
1008 +
1009 + /* Add all builtins that are more or less simple operations on two
1010 + operands. */
1011 + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
1012 + {
1013 + /* Use one of the operands; the target can have a different mode for
1014 + mask-generating compares. */
1015 +
1016 + if (d->name == 0)
1017 + continue;
1018 +
1019 + def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
1020 + }
1021 +}
1022 +
1023 +
1024 +/* Subroutine of avr32_expand_builtin to take care of binop insns. */
1025 +
1026 +static rtx
1027 +avr32_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
1028 +{
1029 + rtx pat;
1030 + tree arg0 = TREE_VALUE (arglist);
1031 + tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1032 + rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1033 + rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1034 + enum machine_mode tmode = insn_data[icode].operand[0].mode;
1035 + enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1036 + enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1037 +
1038 + if (!target
1039 + || GET_MODE (target) != tmode
1040 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1041 + target = gen_reg_rtx (tmode);
1042 +
1043 + /* In case the insn wants input operands in modes different from the
1044 + result, abort. */
1045 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1046 + {
1047 + /* If op0 is already a reg we must cast it to the correct mode. */
1048 + if (REG_P (op0))
1049 + op0 = convert_to_mode (mode0, op0, 1);
1050 + else
1051 + op0 = copy_to_mode_reg (mode0, op0);
1052 + }
1053 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1054 + {
1055 + /* If op1 is already a reg we must cast it to the correct mode. */
1056 + if (REG_P (op1))
1057 + op1 = convert_to_mode (mode1, op1, 1);
1058 + else
1059 + op1 = copy_to_mode_reg (mode1, op1);
1060 + }
1061 + pat = GEN_FCN (icode) (target, op0, op1);
1062 + if (!pat)
1063 + return 0;
1064 + emit_insn (pat);
1065 + return target;
1066 +}
1067 +
1068 +/* Expand an expression EXP that calls a built-in function,
1069 + with result going to TARGET if that's convenient
1070 + (and in mode MODE if that's convenient).
1071 + SUBTARGET may be used as the target for computing one of EXP's operands.
1072 + IGNORE is nonzero if the value is to be ignored. */
1073 +
1074 +rtx
1075 +avr32_expand_builtin (tree exp,
1076 + rtx target,
1077 + rtx subtarget ATTRIBUTE_UNUSED,
1078 + enum machine_mode mode ATTRIBUTE_UNUSED,
1079 + int ignore ATTRIBUTE_UNUSED)
1080 +{
1081 + const struct builtin_description *d;
1082 + unsigned int i;
1083 + enum insn_code icode;
1084 + tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
1085 + tree arglist = TREE_OPERAND (exp, 1);
1086 + tree arg0, arg1, arg2;
1087 + rtx op0, op1, op2, pat;
1088 + enum machine_mode tmode, mode0, mode1;
1089 + enum machine_mode arg0_mode;
1090 + int fcode = DECL_FUNCTION_CODE (fndecl);
1091 +
1092 + switch (fcode)
1093 + {
1094 + default:
1095 + break;
1096 +
1097 + case AVR32_BUILTIN_SATS:
1098 + case AVR32_BUILTIN_SATU:
1099 + case AVR32_BUILTIN_SATRNDS:
1100 + case AVR32_BUILTIN_SATRNDU:
1101 + {
1102 + const char *fname;
1103 + switch (fcode)
1104 + {
1105 + default:
1106 + case AVR32_BUILTIN_SATS:
1107 + icode = CODE_FOR_sats;
1108 + fname = "sats";
1109 + break;
1110 + case AVR32_BUILTIN_SATU:
1111 + icode = CODE_FOR_satu;
1112 + fname = "satu";
1113 + break;
1114 + case AVR32_BUILTIN_SATRNDS:
1115 + icode = CODE_FOR_satrnds;
1116 + fname = "satrnds";
1117 + break;
1118 + case AVR32_BUILTIN_SATRNDU:
1119 + icode = CODE_FOR_satrndu;
1120 + fname = "satrndu";
1121 + break;
1122 + }
1123 +
1124 + arg0 = TREE_VALUE (arglist);
1125 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1126 + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1127 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1128 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1129 + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1130 +
1131 + tmode = insn_data[icode].operand[0].mode;
1132 +
1133 +
1134 + if (target == 0
1135 + || GET_MODE (target) != tmode
1136 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1137 + target = gen_reg_rtx (tmode);
1138 +
1139 +
1140 + if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
1141 + {
1142 + op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
1143 + }
1144 +
1145 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1146 + {
1147 + error ("Parameter 2 to __builtin_%s should be a constant number.",
1148 + fname);
1149 + return NULL_RTX;
1150 + }
1151 +
1152 + if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
1153 + {
1154 + error ("Parameter 3 to __builtin_%s should be a constant number.",
1155 + fname);
1156 + return NULL_RTX;
1157 + }
1158 +
1159 + emit_move_insn (target, op0);
1160 + pat = GEN_FCN (icode) (target, op1, op2);
1161 + if (!pat)
1162 + return 0;
1163 + emit_insn (pat);
1164 +
1165 + return target;
1166 + }
1167 + case AVR32_BUILTIN_MUSTR:
1168 + icode = CODE_FOR_mustr;
1169 + tmode = insn_data[icode].operand[0].mode;
1170 +
1171 + if (target == 0
1172 + || GET_MODE (target) != tmode
1173 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1174 + target = gen_reg_rtx (tmode);
1175 + pat = GEN_FCN (icode) (target);
1176 + if (!pat)
1177 + return 0;
1178 + emit_insn (pat);
1179 + return target;
1180 +
1181 + case AVR32_BUILTIN_MFSR:
1182 + icode = CODE_FOR_mfsr;
1183 + arg0 = TREE_VALUE (arglist);
1184 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1185 + tmode = insn_data[icode].operand[0].mode;
1186 + mode0 = insn_data[icode].operand[1].mode;
1187 +
1188 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1189 + {
1190 + error ("Parameter 1 to __builtin_mfsr must be a constant number");
1191 + }
1192 +
1193 + if (target == 0
1194 + || GET_MODE (target) != tmode
1195 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1196 + target = gen_reg_rtx (tmode);
1197 + pat = GEN_FCN (icode) (target, op0);
1198 + if (!pat)
1199 + return 0;
1200 + emit_insn (pat);
1201 + return target;
1202 + case AVR32_BUILTIN_MTSR:
1203 + icode = CODE_FOR_mtsr;
1204 + arg0 = TREE_VALUE (arglist);
1205 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1206 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1207 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1208 + mode0 = insn_data[icode].operand[0].mode;
1209 + mode1 = insn_data[icode].operand[1].mode;
1210 +
1211 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1212 + {
1213 + error ("Parameter 1 to __builtin_mtsr must be a constant number");
1214 + return gen_reg_rtx (mode0);
1215 + }
1216 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1217 + op1 = copy_to_mode_reg (mode1, op1);
1218 + pat = GEN_FCN (icode) (op0, op1);
1219 + if (!pat)
1220 + return 0;
1221 + emit_insn (pat);
1222 + return NULL_RTX;
1223 + case AVR32_BUILTIN_MFDR:
1224 + icode = CODE_FOR_mfdr;
1225 + arg0 = TREE_VALUE (arglist);
1226 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1227 + tmode = insn_data[icode].operand[0].mode;
1228 + mode0 = insn_data[icode].operand[1].mode;
1229 +
1230 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1231 + {
1232 + error ("Parameter 1 to __builtin_mfdr must be a constant number");
1233 + }
1234 +
1235 + if (target == 0
1236 + || GET_MODE (target) != tmode
1237 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1238 + target = gen_reg_rtx (tmode);
1239 + pat = GEN_FCN (icode) (target, op0);
1240 + if (!pat)
1241 + return 0;
1242 + emit_insn (pat);
1243 + return target;
1244 + case AVR32_BUILTIN_MTDR:
1245 + icode = CODE_FOR_mtdr;
1246 + arg0 = TREE_VALUE (arglist);
1247 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1248 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1249 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1250 + mode0 = insn_data[icode].operand[0].mode;
1251 + mode1 = insn_data[icode].operand[1].mode;
1252 +
1253 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1254 + {
1255 + error ("Parameter 1 to __builtin_mtdr must be a constant number");
1256 + return gen_reg_rtx (mode0);
1257 + }
1258 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1259 + op1 = copy_to_mode_reg (mode1, op1);
1260 + pat = GEN_FCN (icode) (op0, op1);
1261 + if (!pat)
1262 + return 0;
1263 + emit_insn (pat);
1264 + return NULL_RTX;
1265 + case AVR32_BUILTIN_CACHE:
1266 + icode = CODE_FOR_cache;
1267 + arg0 = TREE_VALUE (arglist);
1268 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1269 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1270 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1271 + mode0 = insn_data[icode].operand[0].mode;
1272 + mode1 = insn_data[icode].operand[1].mode;
1273 +
1274 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1275 + {
1276 + error ("Parameter 2 to __builtin_cache must be a constant number");
1277 + return gen_reg_rtx (mode1);
1278 + }
1279 +
1280 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1281 + op0 = copy_to_mode_reg (mode0, op0);
1282 +
1283 + pat = GEN_FCN (icode) (op0, op1);
1284 + if (!pat)
1285 + return 0;
1286 + emit_insn (pat);
1287 + return NULL_RTX;
1288 + case AVR32_BUILTIN_SYNC:
1289 + case AVR32_BUILTIN_MUSFR:
1290 + {
1291 + const char *fname;
1292 + switch (fcode)
1293 + {
1294 + default:
1295 + case AVR32_BUILTIN_SYNC:
1296 + icode = CODE_FOR_sync;
1297 + fname = "sync";
1298 + break;
1299 + case AVR32_BUILTIN_MUSFR:
1300 + icode = CODE_FOR_musfr;
1301 + fname = "musfr";
1302 + break;
1303 + }
1304 +
1305 + arg0 = TREE_VALUE (arglist);
1306 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1307 + mode0 = insn_data[icode].operand[0].mode;
1308 +
1309 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1310 + {
1311 + if (icode == CODE_FOR_musfr)
1312 + op0 = copy_to_mode_reg (mode0, op0);
1313 + else
1314 + {
1315 + error ("Parameter to __builtin_%s is illegal.", fname);
1316 + return gen_reg_rtx (mode0);
1317 + }
1318 + }
1319 + pat = GEN_FCN (icode) (op0);
1320 + if (!pat)
1321 + return 0;
1322 + emit_insn (pat);
1323 + return NULL_RTX;
1324 + }
1325 + case AVR32_BUILTIN_TLBR:
1326 + icode = CODE_FOR_tlbr;
1327 + pat = GEN_FCN (icode) (NULL_RTX);
1328 + if (!pat)
1329 + return 0;
1330 + emit_insn (pat);
1331 + return NULL_RTX;
1332 + case AVR32_BUILTIN_TLBS:
1333 + icode = CODE_FOR_tlbs;
1334 + pat = GEN_FCN (icode) (NULL_RTX);
1335 + if (!pat)
1336 + return 0;
1337 + emit_insn (pat);
1338 + return NULL_RTX;
1339 + case AVR32_BUILTIN_TLBW:
1340 + icode = CODE_FOR_tlbw;
1341 + pat = GEN_FCN (icode) (NULL_RTX);
1342 + if (!pat)
1343 + return 0;
1344 + emit_insn (pat);
1345 + return NULL_RTX;
1346 + case AVR32_BUILTIN_BREAKPOINT:
1347 + icode = CODE_FOR_breakpoint;
1348 + pat = GEN_FCN (icode) (NULL_RTX);
1349 + if (!pat)
1350 + return 0;
1351 + emit_insn (pat);
1352 + return NULL_RTX;
1353 + case AVR32_BUILTIN_XCHG:
1354 + icode = CODE_FOR_xchg;
1355 + arg0 = TREE_VALUE (arglist);
1356 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1357 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1358 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1359 + tmode = insn_data[icode].operand[0].mode;
1360 + mode0 = insn_data[icode].operand[1].mode;
1361 + mode1 = insn_data[icode].operand[3].mode;
1362 +
1363 + if (!(*insn_data[icode].operand[3].predicate) (op1, mode1))
1364 + {
1365 + op1 = copy_to_mode_reg (mode1, op1);
1366 + }
1367 +
1368 + if (!(*insn_data[icode].operand[2].predicate) (op0, mode0))
1369 + {
1370 + op0 = copy_to_mode_reg (mode0, op0);
1371 + }
1372 +
1373 + if (target == 0
1374 + || GET_MODE (target) != tmode
1375 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1376 + target = gen_reg_rtx (tmode);
1377 + pat = GEN_FCN (icode) (target, op0, op0, op1);
1378 + if (!pat)
1379 + return 0;
1380 + emit_insn (pat);
1381 + return target;
1382 + case AVR32_BUILTIN_LDXI:
1383 + icode = CODE_FOR_ldxi;
1384 + arg0 = TREE_VALUE (arglist);
1385 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1386 + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1387 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1388 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1389 + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1390 + tmode = insn_data[icode].operand[0].mode;
1391 + mode0 = insn_data[icode].operand[1].mode;
1392 + mode1 = insn_data[icode].operand[2].mode;
1393 +
1394 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1395 + {
1396 + op0 = copy_to_mode_reg (mode0, op0);
1397 + }
1398 +
1399 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1400 + {
1401 + op1 = copy_to_mode_reg (mode1, op1);
1402 + }
1403 +
1404 + if (!(*insn_data[icode].operand[3].predicate) (op2, SImode))
1405 + {
1406 + error
1407 + ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
1408 + return gen_reg_rtx (mode0);
1409 + }
1410 +
1411 + if (target == 0
1412 + || GET_MODE (target) != tmode
1413 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1414 + target = gen_reg_rtx (tmode);
1415 + pat = GEN_FCN (icode) (target, op0, op1, op2);
1416 + if (!pat)
1417 + return 0;
1418 + emit_insn (pat);
1419 + return target;
1420 + case AVR32_BUILTIN_BSWAP16:
1421 + {
1422 + icode = CODE_FOR_bswap_16;
1423 + arg0 = TREE_VALUE (arglist);
1424 + arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
1425 + mode0 = insn_data[icode].operand[1].mode;
1426 + if (arg0_mode != mode0)
1427 + arg0 = build1 (NOP_EXPR,
1428 + (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
1429 +
1430 + op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
1431 + tmode = insn_data[icode].operand[0].mode;
1432 +
1433 +
1434 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1435 + {
1436 + op0 = copy_to_mode_reg (mode0, op0);
1437 + }
1438 +
1439 + if (target == 0
1440 + || GET_MODE (target) != tmode
1441 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1442 + {
1443 + target = gen_reg_rtx (tmode);
1444 + }
1445 +
1446 +
1447 + pat = GEN_FCN (icode) (target, op0);
1448 + if (!pat)
1449 + return 0;
1450 + emit_insn (pat);
1451 +
1452 + return target;
1453 + }
1454 + case AVR32_BUILTIN_BSWAP32:
1455 + {
1456 + icode = CODE_FOR_bswap_32;
1457 + arg0 = TREE_VALUE (arglist);
1458 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1459 + tmode = insn_data[icode].operand[0].mode;
1460 + mode0 = insn_data[icode].operand[1].mode;
1461 +
1462 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1463 + {
1464 + op0 = copy_to_mode_reg (mode0, op0);
1465 + }
1466 +
1467 + if (target == 0
1468 + || GET_MODE (target) != tmode
1469 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1470 + target = gen_reg_rtx (tmode);
1471 +
1472 +
1473 + pat = GEN_FCN (icode) (target, op0);
1474 + if (!pat)
1475 + return 0;
1476 + emit_insn (pat);
1477 +
1478 + return target;
1479 + }
1480 + case AVR32_BUILTIN_MVCR_W:
1481 + case AVR32_BUILTIN_MVCR_D:
1482 + {
1483 + arg0 = TREE_VALUE (arglist);
1484 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1485 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1486 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1487 +
1488 + if (fcode == AVR32_BUILTIN_MVCR_W)
1489 + icode = CODE_FOR_mvcrsi;
1490 + else
1491 + icode = CODE_FOR_mvcrdi;
1492 +
1493 + tmode = insn_data[icode].operand[0].mode;
1494 +
1495 + if (target == 0
1496 + || GET_MODE (target) != tmode
1497 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1498 + target = gen_reg_rtx (tmode);
1499 +
1500 + if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
1501 + {
1502 + error
1503 + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
1504 + error ("Number should be between 0 and 7.");
1505 + return NULL_RTX;
1506 + }
1507 +
1508 + if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
1509 + {
1510 + error
1511 + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
1512 + error ("Number should be between 0 and 15.");
1513 + return NULL_RTX;
1514 + }
1515 +
1516 + pat = GEN_FCN (icode) (target, op0, op1);
1517 + if (!pat)
1518 + return 0;
1519 + emit_insn (pat);
1520 +
1521 + return target;
1522 + }
1523 + case AVR32_BUILTIN_MACSATHH_W:
1524 + case AVR32_BUILTIN_MACWH_D:
1525 + case AVR32_BUILTIN_MACHH_D:
1526 + {
1527 + arg0 = TREE_VALUE (arglist);
1528 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1529 + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1530 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1531 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1532 + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1533 +
1534 + icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
1535 + (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
1536 + CODE_FOR_machh_d);
1537 +
1538 + tmode = insn_data[icode].operand[0].mode;
1539 + mode0 = insn_data[icode].operand[1].mode;
1540 + mode1 = insn_data[icode].operand[2].mode;
1541 +
1542 +
1543 + if (!target
1544 + || GET_MODE (target) != tmode
1545 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1546 + target = gen_reg_rtx (tmode);
1547 +
1548 + if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
1549 + {
1550 + /* If op0 is already a reg we must cast it to the correct mode. */
1551 + if (REG_P (op0))
1552 + op0 = convert_to_mode (tmode, op0, 1);
1553 + else
1554 + op0 = copy_to_mode_reg (tmode, op0);
1555 + }
1556 +
1557 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
1558 + {
1559 + /* If op1 is already a reg we must cast it to the correct mode. */
1560 + if (REG_P (op1))
1561 + op1 = convert_to_mode (mode0, op1, 1);
1562 + else
1563 + op1 = copy_to_mode_reg (mode0, op1);
1564 + }
1565 +
1566 + if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
1567 + {
1568 + /* If op1 is already a reg we must cast it to the correct mode. */
1569 + if (REG_P (op2))
1570 + op2 = convert_to_mode (mode1, op2, 1);
1571 + else
1572 + op2 = copy_to_mode_reg (mode1, op2);
1573 + }
1574 +
1575 + emit_move_insn (target, op0);
1576 +
1577 + pat = GEN_FCN (icode) (target, op1, op2);
1578 + if (!pat)
1579 + return 0;
1580 + emit_insn (pat);
1581 + return target;
1582 + }
1583 + case AVR32_BUILTIN_MVRC_W:
1584 + case AVR32_BUILTIN_MVRC_D:
1585 + {
1586 + arg0 = TREE_VALUE (arglist);
1587 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1588 + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1589 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1590 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1591 + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1592 +
1593 + if (fcode == AVR32_BUILTIN_MVRC_W)
1594 + icode = CODE_FOR_mvrcsi;
1595 + else
1596 + icode = CODE_FOR_mvrcdi;
1597 +
1598 + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
1599 + {
1600 + error ("Parameter 1 is not a valid coprocessor number.");
1601 + error ("Number should be between 0 and 7.");
1602 + return NULL_RTX;
1603 + }
1604 +
1605 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1606 + {
1607 + error ("Parameter 2 is not a valid coprocessor register number.");
1608 + error ("Number should be between 0 and 15.");
1609 + return NULL_RTX;
1610 + }
1611 +
1612 + if (GET_CODE (op2) == CONST_INT
1613 + || GET_CODE (op2) == CONST
1614 + || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
1615 + {
1616 + op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
1617 + }
1618 +
1619 + if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
1620 + op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
1621 +
1622 +
1623 + pat = GEN_FCN (icode) (op0, op1, op2);
1624 + if (!pat)
1625 + return 0;
1626 + emit_insn (pat);
1627 +
1628 + return NULL_RTX;
1629 + }
1630 + case AVR32_BUILTIN_COP:
1631 + {
1632 + rtx op3, op4;
1633 + tree arg3, arg4;
1634 + icode = CODE_FOR_cop;
1635 + arg0 = TREE_VALUE (arglist);
1636 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1637 + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1638 + arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1639 + arg4 =
1640 + TREE_VALUE (TREE_CHAIN
1641 + (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))));
1642 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1643 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1644 + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1645 + op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0);
1646 + op4 = expand_expr (arg4, NULL_RTX, VOIDmode, 0);
1647 +
1648 + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
1649 + {
1650 + error
1651 + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
1652 + error ("Number should be between 0 and 7.");
1653 + return NULL_RTX;
1654 + }
1655 +
1656 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1657 + {
1658 + error
1659 + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
1660 + error ("Number should be between 0 and 15.");
1661 + return NULL_RTX;
1662 + }
1663 +
1664 + if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
1665 + {
1666 + error
1667 + ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
1668 + error ("Number should be between 0 and 15.");
1669 + return NULL_RTX;
1670 + }
1671 +
1672 + if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
1673 + {
1674 + error
1675 + ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
1676 + error ("Number should be between 0 and 15.");
1677 + return NULL_RTX;
1678 + }
1679 +
1680 + if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
1681 + {
1682 + error
1683 + ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
1684 + error ("Number should be between 0 and 127.");
1685 + return NULL_RTX;
1686 + }
1687 +
1688 + pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
1689 + if (!pat)
1690 + return 0;
1691 + emit_insn (pat);
1692 +
1693 + return target;
1694 + }
1695 + }
1696 +
1697 + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
1698 + if (d->code == fcode)
1699 + return avr32_expand_binop_builtin (d->icode, arglist, target);
1700 +
1701 +
1702 + /* @@@ Should really do something sensible here. */
1703 + return NULL_RTX;
1704 +}
1705 +
1706 +
1707 +/* Handle an "interrupt" or "isr" attribute;
1708 + arguments as in struct attribute_spec.handler. */
1709 +
1710 +static tree
1711 +avr32_handle_isr_attribute (tree * node, tree name, tree args,
1712 + int flags, bool * no_add_attrs)
1713 +{
1714 + if (DECL_P (*node))
1715 + {
1716 + if (TREE_CODE (*node) != FUNCTION_DECL)
1717 + {
1718 + warning ("`%s' attribute only applies to functions",
1719 + IDENTIFIER_POINTER (name));
1720 + *no_add_attrs = true;
1721 + }
1722 + /* FIXME: the argument if any is checked for type attributes; should it
1723 + be checked for decl ones? */
1724 + }
1725 + else
1726 + {
1727 + if (TREE_CODE (*node) == FUNCTION_TYPE
1728 + || TREE_CODE (*node) == METHOD_TYPE)
1729 + {
1730 + if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
1731 + {
1732 + warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
1733 + *no_add_attrs = true;
1734 + }
1735 + }
1736 + else if (TREE_CODE (*node) == POINTER_TYPE
1737 + && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
1738 + || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
1739 + && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
1740 + {
1741 + *node = build_variant_type_copy (*node);
1742 + TREE_TYPE (*node) = build_type_attribute_variant
1743 + (TREE_TYPE (*node),
1744 + tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
1745 + *no_add_attrs = true;
1746 + }
1747 + else
1748 + {
1749 + /* Possibly pass this attribute on from the type to a decl. */
1750 + if (flags & ((int) ATTR_FLAG_DECL_NEXT
1751 + | (int) ATTR_FLAG_FUNCTION_NEXT
1752 + | (int) ATTR_FLAG_ARRAY_NEXT))
1753 + {
1754 + *no_add_attrs = true;
1755 + return tree_cons (name, args, NULL_TREE);
1756 + }
1757 + else
1758 + {
1759 + warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
1760 + }
1761 + }
1762 + }
1763 +
1764 + return NULL_TREE;
1765 +}
1766 +
1767 +/* Handle an attribute requiring a FUNCTION_DECL;
1768 + arguments as in struct attribute_spec.handler. */
1769 +static tree
1770 +avr32_handle_fndecl_attribute (tree * node, tree name,
1771 + tree args ATTRIBUTE_UNUSED,
1772 + int flags ATTRIBUTE_UNUSED,
1773 + bool * no_add_attrs)
1774 +{
1775 + if (TREE_CODE (*node) != FUNCTION_DECL)
1776 + {
1777 + warning ("%qs attribute only applies to functions",
1778 + IDENTIFIER_POINTER (name));
1779 + *no_add_attrs = true;
1780 + }
1781 +
1782 + return NULL_TREE;
1783 +}
1784 +
1785 +
1786 +/* Handle an acall attribute;
1787 + arguments as in struct attribute_spec.handler. */
1788 +
1789 +static tree
1790 +avr32_handle_acall_attribute (tree * node, tree name,
1791 + tree args ATTRIBUTE_UNUSED,
1792 + int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
1793 +{
1794 + if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
1795 + {
1796 + warning ("`%s' attribute not yet supported...",
1797 + IDENTIFIER_POINTER (name));
1798 + *no_add_attrs = true;
1799 + return NULL_TREE;
1800 + }
1801 +
1802 + warning ("`%s' attribute only applies to functions",
1803 + IDENTIFIER_POINTER (name));
1804 + *no_add_attrs = true;
1805 + return NULL_TREE;
1806 +}
1807 +
1808 +
1809 +/* Return 0 if the attributes for two types are incompatible, 1 if they
1810 + are compatible, and 2 if they are nearly compatible (which causes a
1811 + warning to be generated). */
1812 +
1813 +static int
1814 +avr32_comp_type_attributes (tree type1, tree type2)
1815 +{
1816 + int acall1, acall2, isr1, isr2, naked1, naked2;
1817 +
1818 + /* Check for mismatch of non-default calling convention. */
1819 + if (TREE_CODE (type1) != FUNCTION_TYPE)
1820 + return 1;
1821 +
1822 + /* Check for mismatched call attributes. */
1823 + acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
1824 + acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
1825 + naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
1826 + naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
1827 + isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
1828 + if (!isr1)
1829 + isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
1830 +
1831 + isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
1832 + if (!isr2)
1833 + isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
1834 +
1835 + if ((acall1 && isr2)
1836 + || (acall2 && isr1) || (naked1 && isr2) || (naked2 && isr1))
1837 + return 0;
1838 +
1839 + return 1;
1840 +}
1841 +
1842 +
1843 +/* Computes the type of the current function. */
1844 +
1845 +static unsigned long
1846 +avr32_compute_func_type (void)
1847 +{
1848 + unsigned long type = AVR32_FT_UNKNOWN;
1849 + tree a;
1850 + tree attr;
1851 +
1852 + if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1853 + abort ();
1854 +
1855 + /* Decide if the current function is volatile. Such functions never
1856 + return, and many memory cycles can be saved by not storing register
1857 + values that will never be needed again. This optimization was added to
1858 + speed up context switching in a kernel application. */
1859 + if (optimize > 0
1860 + && TREE_NOTHROW (current_function_decl)
1861 + && TREE_THIS_VOLATILE (current_function_decl))
1862 + type |= AVR32_FT_VOLATILE;
1863 +
1864 + if (cfun->static_chain_decl != NULL)
1865 + type |= AVR32_FT_NESTED;
1866 +
1867 + attr = DECL_ATTRIBUTES (current_function_decl);
1868 +
1869 + a = lookup_attribute ("isr", attr);
1870 + if (a == NULL_TREE)
1871 + a = lookup_attribute ("interrupt", attr);
1872 +
1873 + if (a == NULL_TREE)
1874 + type |= AVR32_FT_NORMAL;
1875 + else
1876 + type |= avr32_isr_value (TREE_VALUE (a));
1877 +
1878 +
1879 + a = lookup_attribute ("acall", attr);
1880 + if (a != NULL_TREE)
1881 + type |= AVR32_FT_ACALL;
1882 +
1883 + a = lookup_attribute ("naked", attr);
1884 + if (a != NULL_TREE)
1885 + type |= AVR32_FT_NAKED;
1886 +
1887 + return type;
1888 +}
1889 +
1890 +/* Returns the type of the current function. */
1891 +
1892 +static unsigned long
1893 +avr32_current_func_type (void)
1894 +{
1895 + if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
1896 + cfun->machine->func_type = avr32_compute_func_type ();
1897 +
1898 + return cfun->machine->func_type;
1899 +}
1900 +
1901 +/*
1902 + This target hook should return true if we should not pass type solely
1903 + in registers. The file expr.h defines a definition that is usually appropriate,
1904 + refer to expr.h for additional documentation.
1905 +*/
1906 +bool
1907 +avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
1908 +{
1909 + if (type && AGGREGATE_TYPE_P (type)
1910 + /* If the alignment is less than the size then pass in the struct on
1911 + the stack. */
1912 + && ((unsigned int) TYPE_ALIGN_UNIT (type) <
1913 + (unsigned int) int_size_in_bytes (type))
1914 + /* If we support unaligned word accesses then structs of size 4 and 8
1915 + can have any alignment and still be passed in registers. */
1916 + && !(TARGET_UNALIGNED_WORD
1917 + && (int_size_in_bytes (type) == 4
1918 + || int_size_in_bytes (type) == 8))
1919 + /* Double word structs need only a word alignment. */
1920 + && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
1921 + return true;
1922 +
1923 + if (type && AGGREGATE_TYPE_P (type)
1924 + /* Structs of size 3,5,6,7 are always passed in registers. */
1925 + && (int_size_in_bytes (type) == 3
1926 + || int_size_in_bytes (type) == 5
1927 + || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
1928 + return true;
1929 +
1930 +
1931 + return (type && TREE_ADDRESSABLE (type));
1932 +}
1933 +
1934 +
1935 +bool
1936 +avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1937 +{
1938 + return true;
1939 +}
1940 +
1941 +/*
1942 + This target hook should return true if an argument at the position indicated
1943 + by cum should be passed by reference. This predicate is queried after target
1944 + independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type).
1945 +
1946 + If the hook returns true, a copy of that argument is made in memory and a
1947 + pointer to the argument is passed instead of the argument itself. The pointer
1948 + is passed in whatever way is appropriate for passing a pointer to that type.
1949 +*/
1950 +bool
1951 +avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
1952 + enum machine_mode mode ATTRIBUTE_UNUSED,
1953 + tree type, bool named ATTRIBUTE_UNUSED)
1954 +{
1955 + return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
1956 +}
1957 +
1958 +static int
1959 +avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED,
1960 + enum machine_mode mode ATTRIBUTE_UNUSED,
1961 + tree type ATTRIBUTE_UNUSED,
1962 + bool named ATTRIBUTE_UNUSED)
1963 +{
1964 + return 0;
1965 +}
1966 +
1967 +
1968 +struct gcc_target targetm = TARGET_INITIALIZER;
1969 +
1970 +/*
1971 + Table used to convert from register number in the assembler instructions and
1972 + the register numbers used in gcc.
1973 +*/
1974 +const int avr32_function_arg_reglist[] =
1975 +{
1976 + INTERNAL_REGNUM (12),
1977 + INTERNAL_REGNUM (11),
1978 + INTERNAL_REGNUM (10),
1979 + INTERNAL_REGNUM (9),
1980 + INTERNAL_REGNUM (8)
1981 +};
1982 +
1983 +rtx avr32_compare_op0 = NULL_RTX;
1984 +rtx avr32_compare_op1 = NULL_RTX;
1985 +rtx avr32_compare_operator = NULL_RTX;
1986 +rtx avr32_acc_cache = NULL_RTX;
1987 +
1988 +/*
1989 + Returns nonzero if it is allowed to store a value of mode mode in hard
1990 + register number regno.
1991 +*/
1992 +int
1993 +avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode)
1994 +{
1995 + /* We allow only float modes in the fp-registers */
1996 + if (regnr >= FIRST_FP_REGNUM
1997 + && regnr <= LAST_FP_REGNUM && GET_MODE_CLASS (mode) != MODE_FLOAT)
1998 + {
1999 + return 0;
2000 + }
2001 +
2002 + switch (mode)
2003 + {
2004 + case DImode: /* long long */
2005 + case DFmode: /* double */
2006 + case SCmode: /* __complex__ float */
2007 + case CSImode: /* __complex__ int */
2008 + if (regnr < 4)
2009 + { /* long long int not supported in r12, sp, lr
2010 + or pc. */
2011 + return 0;
2012 + }
2013 + else
2014 + {
2015 + if (regnr % 2) /* long long int has to be refered in even
2016 + registers. */
2017 + return 0;
2018 + else
2019 + return 1;
2020 + }
2021 + case CDImode: /* __complex__ long long */
2022 + case DCmode: /* __complex__ double */
2023 + case TImode: /* 16 bytes */
2024 + if (regnr < 7)
2025 + return 0;
2026 + else if (regnr % 2)
2027 + return 0;
2028 + else
2029 + return 1;
2030 + default:
2031 + return 1;
2032 + }
2033 +}
2034 +
2035 +
2036 +int
2037 +avr32_rnd_operands (rtx add, rtx shift)
2038 +{
2039 + if (GET_CODE (shift) == CONST_INT &&
2040 + GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
2041 + {
2042 + if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
2043 + return TRUE;
2044 + }
2045 +
2046 + return FALSE;
2047 +}
2048 +
2049 +
2050 +
2051 +int
2052 +avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str)
2053 +{
2054 + switch (c)
2055 + {
2056 + case 'K':
2057 + case 'I':
2058 + {
2059 + HOST_WIDE_INT min_value = 0, max_value = 0;
2060 + char size_str[3];
2061 + int const_size;
2062 +
2063 + size_str[0] = str[2];
2064 + size_str[1] = str[3];
2065 + size_str[2] = '\0';
2066 + const_size = atoi (size_str);
2067 +
2068 + if (toupper (str[1]) == 'U')
2069 + {
2070 + min_value = 0;
2071 + max_value = (1 << const_size) - 1;
2072 + }
2073 + else if (toupper (str[1]) == 'S')
2074 + {
2075 + min_value = -(1 << (const_size - 1));
2076 + max_value = (1 << (const_size - 1)) - 1;
2077 + }
2078 +
2079 + if (c == 'I')
2080 + {
2081 + value = -value;
2082 + }
2083 +
2084 + if (value >= min_value && value <= max_value)
2085 + {
2086 + return 1;
2087 + }
2088 + break;
2089 + }
2090 + case 'M':
2091 + return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
2092 + }
2093 +
2094 + return 0;
2095 +}
2096 +
2097 +
2098 +/*Compute mask of which floating-point registers needs saving upon
2099 + entry to this function*/
2100 +static unsigned long
2101 +avr32_compute_save_fp_reg_mask (void)
2102 +{
2103 + unsigned long func_type = avr32_current_func_type ();
2104 + unsigned int save_reg_mask = 0;
2105 + unsigned int reg;
2106 + unsigned int max_reg = 7;
2107 + int save_all_call_used_regs = FALSE;
2108 +
2109 + /* This only applies for hardware floating-point implementation. */
2110 + if (!TARGET_HARD_FLOAT)
2111 + return 0;
2112 +
2113 + if (IS_INTERRUPT (func_type))
2114 + {
2115 +
2116 + /* Interrupt functions must not corrupt any registers, even call
2117 + clobbered ones. If this is a leaf function we can just examine the
2118 + registers used by the RTL, but otherwise we have to assume that
2119 + whatever function is called might clobber anything, and so we have
2120 + to save all the call-clobbered registers as well. */
2121 + max_reg = 13;
2122 + save_all_call_used_regs = !current_function_is_leaf;
2123 + }
2124 +
2125 + /* All used registers used must be saved */
2126 + for (reg = 0; reg <= max_reg; reg++)
2127 + if (regs_ever_live[INTERNAL_FP_REGNUM (reg)]
2128 + || (save_all_call_used_regs
2129 + && call_used_regs[INTERNAL_FP_REGNUM (reg)]))
2130 + save_reg_mask |= (1 << reg);
2131 +
2132 + return save_reg_mask;
2133 +}
2134 +
2135 +/*Compute mask of registers which needs saving upon function entry */
2136 +static unsigned long
2137 +avr32_compute_save_reg_mask (int push)
2138 +{
2139 + unsigned long func_type;
2140 + unsigned int save_reg_mask = 0;
2141 + unsigned int reg;
2142 +
2143 + func_type = avr32_current_func_type ();
2144 +
2145 + if (IS_INTERRUPT (func_type))
2146 + {
2147 + unsigned int max_reg = 12;
2148 +
2149 +
2150 + /* Get the banking scheme for the interrupt */
2151 + switch (func_type)
2152 + {
2153 + case AVR32_FT_ISR_FULL:
2154 + max_reg = 0;
2155 + break;
2156 + case AVR32_FT_ISR_HALF:
2157 + max_reg = 7;
2158 + break;
2159 + case AVR32_FT_ISR_NONE:
2160 + max_reg = 12;
2161 + break;
2162 + }
2163 +
2164 + /* Interrupt functions must not corrupt any registers, even call
2165 + clobbered ones. If this is a leaf function we can just examine the
2166 + registers used by the RTL, but otherwise we have to assume that
2167 + whatever function is called might clobber anything, and so we have
2168 + to save all the call-clobbered registers as well. */
2169 +
2170 + /* Need not push the registers r8-r12 for AVR32A architectures, as this
2171 + is automatially done in hardware. We also do not have any shadow
2172 + registers. */
2173 + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A)
2174 + {
2175 + max_reg = 7;
2176 + func_type = AVR32_FT_ISR_NONE;
2177 + }
2178 +
2179 + /* All registers which are used and is not shadowed must be saved */
2180 + for (reg = 0; reg <= max_reg; reg++)
2181 + if (regs_ever_live[INTERNAL_REGNUM (reg)]
2182 + || (!current_function_is_leaf
2183 + && call_used_regs[INTERNAL_REGNUM (reg)]))
2184 + save_reg_mask |= (1 << reg);
2185 +
2186 + /* Check LR */
2187 + if ((regs_ever_live[LR_REGNUM] || !current_function_is_leaf || frame_pointer_needed) && (func_type == AVR32_FT_ISR_NONE) /* Only
2188 + non-shadowed
2189 + register
2190 + models
2191 + */ )
2192 + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
2193 +
2194 + /* Make sure that the GOT register is pushed. */
2195 + if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
2196 + && current_function_uses_pic_offset_table)
2197 + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
2198 +
2199 + }
2200 + else
2201 + {
2202 + int use_pushm = optimize_size;
2203 +
2204 + /* In the normal case we only need to save those registers which are
2205 + call saved and which are used by this function. */
2206 + for (reg = 0; reg <= 7; reg++)
2207 + if (regs_ever_live[INTERNAL_REGNUM (reg)]
2208 + && !call_used_regs[INTERNAL_REGNUM (reg)])
2209 + save_reg_mask |= (1 << reg);
2210 +
2211 + /* Make sure that the GOT register is pushed. */
2212 + if (current_function_uses_pic_offset_table)
2213 + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
2214 +
2215 +
2216 + /* If we optimize for size and do not have anonymous arguments: use
2217 + popm/pushm always */
2218 + if (use_pushm)
2219 + {
2220 + if ((save_reg_mask & (1 << 0))
2221 + || (save_reg_mask & (1 << 1))
2222 + || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
2223 + save_reg_mask |= 0xf;
2224 +
2225 + if ((save_reg_mask & (1 << 4))
2226 + || (save_reg_mask & (1 << 5))
2227 + || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
2228 + save_reg_mask |= 0xf0;
2229 +
2230 + if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
2231 + save_reg_mask |= 0x300;
2232 + }
2233 +
2234 +
2235 + /* Check LR */
2236 + if ((regs_ever_live[LR_REGNUM] || !current_function_is_leaf ||
2237 + (optimize_size && save_reg_mask) || frame_pointer_needed))
2238 + {
2239 + if (push)
2240 + {
2241 + /* Push/Pop LR */
2242 + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
2243 + }
2244 + else
2245 + {
2246 + /* Pop PC */
2247 + save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
2248 + }
2249 + }
2250 + }
2251 +
2252 + return save_reg_mask;
2253 +}
2254 +
2255 +/*Compute total size in bytes of all saved registers */
2256 +static int
2257 +avr32_get_reg_mask_size (int reg_mask)
2258 +{
2259 + int reg, size;
2260 + size = 0;
2261 +
2262 + for (reg = 0; reg <= 15; reg++)
2263 + if (reg_mask & (1 << reg))
2264 + size += 4;
2265 +
2266 + return size;
2267 +}
2268 +
2269 +/*Get a register from one of the registers which are saved onto the stack
2270 + upon function entry */
2271 +
2272 +static int
2273 +avr32_get_saved_reg (int save_reg_mask)
2274 +{
2275 + unsigned int reg;
2276 +
2277 + /* Find the first register which is saved in the saved_reg_mask */
2278 + for (reg = 0; reg <= 15; reg++)
2279 + if (save_reg_mask & (1 << reg))
2280 + return reg;
2281 +
2282 + return -1;
2283 +}
2284 +
2285 +/* Return 1 if it is possible to return using a single instruction. */
2286 +int
2287 +avr32_use_return_insn (int iscond)
2288 +{
2289 + unsigned int func_type = avr32_current_func_type ();
2290 + unsigned long saved_int_regs;
2291 + unsigned long saved_fp_regs;
2292 +
2293 + /* Never use a return instruction before reload has run. */
2294 + if (!reload_completed)
2295 + return 0;
2296 +
2297 + /* Must adjust the stack for vararg functions. */
2298 + if (current_function_args_info.uses_anonymous_args)
2299 + return 0;
2300 +
2301 + /* If there a stack adjstment. */
2302 + if (get_frame_size ())
2303 + return 0;
2304 +
2305 + saved_int_regs = avr32_compute_save_reg_mask (TRUE);
2306 + saved_fp_regs = avr32_compute_save_fp_reg_mask ();
2307 +
2308 + /* Functions which have saved fp-regs on the stack can not be performed in
2309 + one instruction */
2310 + if (saved_fp_regs)
2311 + return 0;
2312 +
2313 + /* Conditional returns can not be performed in one instruction if we need
2314 + to restore registers from the stack */
2315 + if (iscond && saved_int_regs)
2316 + return 0;
2317 +
2318 + /* Conditional return can not be used for interrupt handlers. */
2319 + if (iscond && IS_INTERRUPT (func_type))
2320 + return 0;
2321 +
2322 + /* For interrupt handlers which needs to pop registers */
2323 + if (saved_int_regs && IS_INTERRUPT (func_type))
2324 + return 0;
2325 +
2326 +
2327 + /* If there are saved registers but the LR isn't saved, then we need two
2328 + instructions for the return. */
2329 + if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
2330 + return 0;
2331 +
2332 +
2333 + return 1;
2334 +}
2335 +
2336 +
2337 +/*Generate some function prologue info in the assembly file*/
2338 +
2339 +void
2340 +avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size)
2341 +{
2342 + if (IS_NAKED (avr32_current_func_type ()))
2343 + fprintf (f,
2344 + "\t# Function is naked: Prologue and epilogue provided by programmer\n");
2345 +
2346 + if (IS_INTERRUPT (avr32_current_func_type ()))
2347 + {
2348 + switch (avr32_current_func_type ())
2349 + {
2350 + case AVR32_FT_ISR_FULL:
2351 + fprintf (f,
2352 + "\t# Interrupt Function: Fully shadowed register file\n");
2353 + break;
2354 + case AVR32_FT_ISR_HALF:
2355 + fprintf (f,
2356 + "\t# Interrupt Function: Half shadowed register file\n");
2357 + break;
2358 + default:
2359 + case AVR32_FT_ISR_NONE:
2360 + fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
2361 + break;
2362 + }
2363 + }
2364 +
2365 +
2366 + fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
2367 + current_function_args_size, frame_size,
2368 + current_function_pretend_args_size);
2369 +
2370 + fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
2371 + frame_pointer_needed, current_function_is_leaf);
2372 +
2373 + fprintf (f, "\t# uses_anonymous_args = %i\n",
2374 + current_function_args_info.uses_anonymous_args);
2375 +}
2376 +
2377 +
2378 +/* Generate and emit an insn that we will recognize as a pushm or stm.
2379 + Unfortunately, since this insn does not reflect very well the actual
2380 + semantics of the operation, we need to annotate the insn for the benefit
2381 + of DWARF2 frame unwind information. */
2382 +
2383 +int avr32_convert_to_reglist16 (int reglist8_vect);
2384 +
2385 +static rtx
2386 +emit_multi_reg_push (int reglist, int usePUSHM)
2387 +{
2388 + rtx insn;
2389 + rtx dwarf;
2390 + rtx tmp;
2391 + rtx reg;
2392 + int i;
2393 + int nr_regs;
2394 + int index = 0;
2395 +
2396 + if (usePUSHM)
2397 + {
2398 + insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
2399 + reglist = avr32_convert_to_reglist16 (reglist);
2400 + }
2401 + else
2402 + {
2403 + insn = emit_insn (gen_stm (stack_pointer_rtx,
2404 + gen_rtx_CONST_INT (SImode, reglist),
2405 + gen_rtx_CONST_INT (SImode, 1)));
2406 + }
2407 +
2408 + nr_regs = avr32_get_reg_mask_size (reglist) / 4;
2409 + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
2410 +
2411 + for (i = 15; i >= 0; i--)
2412 + {
2413 + if (reglist & (1 << i))
2414 + {
2415 + reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
2416 + tmp = gen_rtx_SET (VOIDmode,
2417 + gen_rtx_MEM (SImode,
2418 + plus_constant (stack_pointer_rtx,
2419 + 4 * index)), reg);
2420 + RTX_FRAME_RELATED_P (tmp) = 1;
2421 + XVECEXP (dwarf, 0, 1 + index++) = tmp;
2422 + }
2423 + }
2424 +
2425 + tmp = gen_rtx_SET (SImode,
2426 + stack_pointer_rtx,
2427 + gen_rtx_PLUS (SImode,
2428 + stack_pointer_rtx,
2429 + GEN_INT (-4 * nr_regs)));
2430 + RTX_FRAME_RELATED_P (tmp) = 1;
2431 + XVECEXP (dwarf, 0, 0) = tmp;
2432 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
2433 + REG_NOTES (insn));
2434 + return insn;
2435 +}
2436 +
2437 +
2438 +static rtx
2439 +emit_multi_fp_reg_push (int reglist)
2440 +{
2441 + rtx insn;
2442 + rtx dwarf;
2443 + rtx tmp;
2444 + rtx reg;
2445 + int i;
2446 + int nr_regs;
2447 + int index = 0;
2448 +
2449 + insn = emit_insn (gen_stm_fp (stack_pointer_rtx,
2450 + gen_rtx_CONST_INT (SImode, reglist),
2451 + gen_rtx_CONST_INT (SImode, 1)));
2452 +
2453 + nr_regs = avr32_get_reg_mask_size (reglist) / 4;
2454 + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
2455 +
2456 + for (i = 15; i >= 0; i--)
2457 + {
2458 + if (reglist & (1 << i))
2459 + {
2460 + reg = gen_rtx_REG (SImode, INTERNAL_FP_REGNUM (i));
2461 + tmp = gen_rtx_SET (VOIDmode,
2462 + gen_rtx_MEM (SImode,
2463 + plus_constant (stack_pointer_rtx,
2464 + 4 * index)), reg);
2465 + RTX_FRAME_RELATED_P (tmp) = 1;
2466 + XVECEXP (dwarf, 0, 1 + index++) = tmp;
2467 + }
2468 + }
2469 +
2470 + tmp = gen_rtx_SET (SImode,
2471 + stack_pointer_rtx,
2472 + gen_rtx_PLUS (SImode,
2473 + stack_pointer_rtx,
2474 + GEN_INT (-4 * nr_regs)));
2475 + RTX_FRAME_RELATED_P (tmp) = 1;
2476 + XVECEXP (dwarf, 0, 0) = tmp;
2477 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
2478 + REG_NOTES (insn));
2479 + return insn;
2480 +}
2481 +
2482 +rtx
2483 +avr32_gen_load_multiple (rtx * regs, int count, rtx from,
2484 + int write_back, int in_struct_p, int scalar_p)
2485 +{
2486 +
2487 + rtx result;
2488 + int i = 0, j;
2489 +
2490 + result =
2491 + gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
2492 +
2493 + if (write_back)
2494 + {
2495 + XVECEXP (result, 0, 0)
2496 + = gen_rtx_SET (GET_MODE (from), from,
2497 + plus_constant (from, count * 4));
2498 + i = 1;
2499 + count++;
2500 + }
2501 +
2502 +
2503 + for (j = 0; i < count; i++, j++)
2504 + {
2505 + rtx unspec;
2506 + rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
2507 + MEM_IN_STRUCT_P (mem) = in_struct_p;
2508 + MEM_SCALAR_P (mem) = scalar_p;
2509 + unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
2510 + XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
2511 + }
2512 +
2513 + return result;
2514 +}
2515 +
2516 +
2517 +rtx
2518 +avr32_gen_store_multiple (rtx * regs, int count, rtx to,
2519 + int in_struct_p, int scalar_p)
2520 +{
2521 + rtx result;
2522 + int i = 0, j;
2523 +
2524 + result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
2525 +
2526 + for (j = 0; i < count; i++, j++)
2527 + {
2528 + rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
2529 + MEM_IN_STRUCT_P (mem) = in_struct_p;
2530 + MEM_SCALAR_P (mem) = scalar_p;
2531 + XVECEXP (result, 0, i)
2532 + = gen_rtx_SET (VOIDmode, mem,
2533 + gen_rtx_UNSPEC (VOIDmode,
2534 + gen_rtvec (1, regs[j]),
2535 + UNSPEC_STORE_MULTIPLE));
2536 + }
2537 +
2538 + return result;
2539 +}
2540 +
2541 +
2542 +/* Move a block of memory if it is word aligned or we support unaligned
2543 + word memory accesses. The size must be maximum 64 bytes. */
2544 +
2545 +int
2546 +avr32_gen_movmemsi (rtx * operands)
2547 +{
2548 + HOST_WIDE_INT bytes_to_go;
2549 + rtx src, dst;
2550 + rtx st_src, st_dst;
2551 + int ptr_offset = 0;
2552 + int block_size;
2553 + int dst_in_struct_p, src_in_struct_p;
2554 + int dst_scalar_p, src_scalar_p;
2555 + int unaligned;
2556 +
2557 + if (GET_CODE (operands[2]) != CONST_INT
2558 + || GET_CODE (operands[3]) != CONST_INT
2559 + || INTVAL (operands[2]) > 64
2560 + || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
2561 + return 0;
2562 +
2563 + unaligned = (INTVAL (operands[3]) & 3) != 0;
2564 +
2565 + block_size = 4;
2566 +
2567 + st_dst = XEXP (operands[0], 0);
2568 + st_src = XEXP (operands[1], 0);
2569 +
2570 + dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
2571 + dst_scalar_p = MEM_SCALAR_P (operands[0]);
2572 + src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
2573 + src_scalar_p = MEM_SCALAR_P (operands[1]);
2574 +
2575 + dst = copy_to_mode_reg (SImode, st_dst);
2576 + src = copy_to_mode_reg (SImode, st_src);
2577 +
2578 + bytes_to_go = INTVAL (operands[2]);
2579 +
2580 + while (bytes_to_go)
2581 + {
2582 + enum machine_mode move_mode;
2583 + /* Seems to be a problem with reloads for the movti pattern so this is
2584 + disabled until that problem is resolved */
2585 +
2586 + /* if ( bytes_to_go >= GET_MODE_SIZE(TImode) ) move_mode = TImode; else
2587 + */
2588 + if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
2589 + move_mode = DImode;
2590 + else if (bytes_to_go >= GET_MODE_SIZE (SImode))
2591 + move_mode = SImode;
2592 + else
2593 + move_mode = QImode;
2594 +
2595 + {
2596 + rtx dst_mem = gen_rtx_MEM (move_mode,
2597 + gen_rtx_PLUS (SImode, dst,
2598 + GEN_INT (ptr_offset)));
2599 + rtx src_mem = gen_rtx_MEM (move_mode,
2600 + gen_rtx_PLUS (SImode, src,
2601 + GEN_INT (ptr_offset)));
2602 + ptr_offset += GET_MODE_SIZE (move_mode);
2603 + bytes_to_go -= GET_MODE_SIZE (move_mode);
2604 +
2605 + MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
2606 + MEM_SCALAR_P (dst_mem) = dst_scalar_p;
2607 +
2608 + MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
2609 + MEM_SCALAR_P (src_mem) = src_scalar_p;
2610 + emit_move_insn (dst_mem, src_mem);
2611 +
2612 + }
2613 + }
2614 +
2615 + return 1;
2616 +}
2617 +
2618 +
2619 +
2620 +/*Expand the prologue instruction*/
2621 +void
2622 +avr32_expand_prologue (void)
2623 +{
2624 + rtx insn, dwarf;
2625 + unsigned long saved_reg_mask, saved_fp_reg_mask;
2626 + int reglist8 = 0;
2627 +
2628 + /* Naked functions does not have a prologue */
2629 + if (IS_NAKED (avr32_current_func_type ()))
2630 + return;
2631 +
2632 + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
2633 +
2634 + if (saved_reg_mask)
2635 + {
2636 + /* Must push used registers */
2637 +
2638 + /* Should we use POPM or LDM? */
2639 + int usePUSHM = TRUE;
2640 + reglist8 = 0;
2641 + if (((saved_reg_mask & (1 << 0)) ||
2642 + (saved_reg_mask & (1 << 1)) ||
2643 + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
2644 + {
2645 + /* One of R0-R3 should at least be pushed */
2646 + if (((saved_reg_mask & (1 << 0)) &&
2647 + (saved_reg_mask & (1 << 1)) &&
2648 + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
2649 + {
2650 + /* All should be pushed */
2651 + reglist8 |= 0x01;
2652 + }
2653 + else
2654 + {
2655 + usePUSHM = FALSE;
2656 + }
2657 + }
2658 +
2659 + if (((saved_reg_mask & (1 << 4)) ||
2660 + (saved_reg_mask & (1 << 5)) ||
2661 + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
2662 + {
2663 + /* One of R4-R7 should at least be pushed */
2664 + if (((saved_reg_mask & (1 << 4)) &&
2665 + (saved_reg_mask & (1 << 5)) &&
2666 + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
2667 + {
2668 + if (usePUSHM)
2669 + /* All should be pushed */
2670 + reglist8 |= 0x02;
2671 + }
2672 + else
2673 + {
2674 + usePUSHM = FALSE;
2675 + }
2676 + }
2677 +
2678 + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
2679 + {
2680 + /* One of R8-R9 should at least be pushed */
2681 + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
2682 + {
2683 + if (usePUSHM)
2684 + /* All should be pushed */
2685 + reglist8 |= 0x04;
2686 + }
2687 + else
2688 + {
2689 + usePUSHM = FALSE;
2690 + }
2691 + }
2692 +
2693 + if (saved_reg_mask & (1 << 10))
2694 + reglist8 |= 0x08;
2695 +
2696 + if (saved_reg_mask & (1 << 11))
2697 + reglist8 |= 0x10;
2698 +
2699 + if (saved_reg_mask & (1 << 12))
2700 + reglist8 |= 0x20;
2701 +
2702 + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
2703 + {
2704 + /* Push LR */
2705 + reglist8 |= 0x40;
2706 + }
2707 +
2708 + if (usePUSHM)
2709 + {
2710 + insn = emit_multi_reg_push (reglist8, TRUE);
2711 + }
2712 + else
2713 + {
2714 + insn = emit_multi_reg_push (saved_reg_mask, FALSE);
2715 + }
2716 + RTX_FRAME_RELATED_P (insn) = 1;
2717 +
2718 + /* Prevent this instruction from being scheduled after any other
2719 + instructions. */
2720 + emit_insn (gen_blockage ());
2721 + }
2722 +
2723 + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
2724 + if (saved_fp_reg_mask)
2725 + {
2726 + insn = emit_multi_fp_reg_push (saved_fp_reg_mask);
2727 + RTX_FRAME_RELATED_P (insn) = 1;
2728 +
2729 + /* Prevent this instruction from being scheduled after any other
2730 + instructions. */
2731 + emit_insn (gen_blockage ());
2732 + }
2733 +
2734 + /* Set frame pointer */
2735 + if (frame_pointer_needed)
2736 + {
2737 + insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
2738 + RTX_FRAME_RELATED_P (insn) = 1;
2739 + }
2740 +
2741 + if (get_frame_size () > 0)
2742 + {
2743 + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
2744 + {
2745 + insn = emit_insn (gen_rtx_SET (SImode,
2746 + stack_pointer_rtx,
2747 + gen_rtx_PLUS (SImode,
2748 + stack_pointer_rtx,
2749 + gen_rtx_CONST_INT
2750 + (SImode,
2751 + -get_frame_size
2752 + ()))));
2753 + RTX_FRAME_RELATED_P (insn) = 1;
2754 + }
2755 + else
2756 + {
2757 + /* Immediate is larger than k21 We must either check if we can use
2758 + one of the pushed reegisters as temporary storage or we must
2759 + make us a temp register by pushing a register to the stack. */
2760 + rtx temp_reg, const_pool_entry, insn;
2761 + if (saved_reg_mask)
2762 + {
2763 + temp_reg =
2764 + gen_rtx_REG (SImode,
2765 + INTERNAL_REGNUM (avr32_get_saved_reg
2766 + (saved_reg_mask)));
2767 + }
2768 + else
2769 + {
2770 + temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
2771 + emit_move_insn (gen_rtx_MEM
2772 + (SImode,
2773 + gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
2774 + temp_reg);
2775 + }
2776 +
2777 + const_pool_entry =
2778 + force_const_mem (SImode,
2779 + gen_rtx_CONST_INT (SImode, get_frame_size ()));
2780 + emit_move_insn (temp_reg, const_pool_entry);
2781 +
2782 + insn = emit_insn (gen_rtx_SET (SImode,
2783 + stack_pointer_rtx,
2784 + gen_rtx_MINUS (SImode,
2785 + stack_pointer_rtx,
2786 + temp_reg)));
2787 +
2788 + dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
2789 + gen_rtx_PLUS (SImode, stack_pointer_rtx,
2790 + GEN_INT (-get_frame_size ())));
2791 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2792 + dwarf, REG_NOTES (insn));
2793 + RTX_FRAME_RELATED_P (insn) = 1;
2794 +
2795 + if (!saved_reg_mask)
2796 + {
2797 + insn =
2798 + emit_move_insn (temp_reg,
2799 + gen_rtx_MEM (SImode,
2800 + gen_rtx_POST_INC (SImode,
2801 + gen_rtx_REG
2802 + (SImode,
2803 + 13))));
2804 + }
2805 +
2806 + /* Mark the temp register as dead */
2807 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
2808 + REG_NOTES (insn));
2809 +
2810 +
2811 + }
2812 +
2813 + /* Prevent the the stack adjustment to be scheduled after any
2814 + instructions using the frame pointer. */
2815 + emit_insn (gen_blockage ());
2816 + }
2817 +
2818 + /* Load GOT */
2819 + if (flag_pic)
2820 + {
2821 + avr32_load_pic_register ();
2822 +
2823 + /* gcc does not know that load or call instructions might use the pic
2824 + register so it might schedule these instructions before the loading
2825 + of the pic register. To avoid this emit a barrier for now. TODO!
2826 + Find out a better way to let gcc know which instructions might use
2827 + the pic register. */
2828 + emit_insn (gen_blockage ());
2829 + }
2830 + return;
2831 +}
2832 +
2833 +void
2834 +avr32_set_return_address (rtx source)
2835 +{
2836 + rtx addr;
2837 + unsigned long saved_regs;
2838 +
2839 + saved_regs = avr32_compute_save_reg_mask (TRUE);
2840 +
2841 + if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
2842 + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
2843 + else
2844 + {
2845 + if (frame_pointer_needed)
2846 + addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
2847 + else
2848 + /* FIXME: Need to use scratch register if frame is large */
2849 + addr = plus_constant (stack_pointer_rtx, get_frame_size ());
2850 +
2851 + emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
2852 + }
2853 +}
2854 +
2855 +
2856 +
2857 +/* Return the length of INSN. LENGTH is the initial length computed by
2858 + attributes in the machine-description file. */
2859 +
2860 +int
2861 +avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED,
2862 + int length ATTRIBUTE_UNUSED)
2863 +{
2864 + return length;
2865 +}
2866 +
2867 +void
2868 +avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED,
2869 + int iscond ATTRIBUTE_UNUSED,
2870 + rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
2871 +{
2872 +
2873 + unsigned long saved_reg_mask, saved_fp_reg_mask;
2874 + int insert_ret = TRUE;
2875 + int reglist8 = 0;
2876 + int stack_adjustment = get_frame_size ();
2877 + unsigned int func_type = avr32_current_func_type ();
2878 + FILE *f = asm_out_file;
2879 +
2880 + /* Naked functions does not have an epilogue */
2881 + if (IS_NAKED (func_type))
2882 + return;
2883 +
2884 + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
2885 +
2886 + saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
2887 +
2888 + /* Reset frame pointer */
2889 + if (stack_adjustment > 0)
2890 + {
2891 + if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
2892 + {
2893 + fprintf (f, "\tsub sp, %i # Reset Frame Pointer\n",
2894 + -stack_adjustment);
2895 + }
2896 + else
2897 + {
2898 + /* TODO! Is it safe to use r8 as scratch?? */
2899 + fprintf (f, "\tmov r8, lo(%i) # Reset Frame Pointer\n",
2900 + -stack_adjustment);
2901 + fprintf (f, "\torh r8, hi(%i) # Reset Frame Pointer\n",
2902 + -stack_adjustment);
2903 + fprintf (f, "\tadd sp,r8 # Reset Frame Pointer\n");
2904 + }
2905 + }
2906 +
2907 + if (saved_fp_reg_mask)
2908 + {
2909 + char reglist[64]; /* 64 bytes should be enough... */
2910 + avr32_make_fp_reglist_w (saved_fp_reg_mask, (char *) reglist);
2911 + fprintf (f, "\tldcm.w\tcp0, sp++, %s\n", reglist);
2912 + if (saved_fp_reg_mask & ~0xff)
2913 + {
2914 + saved_fp_reg_mask &= ~0xff;
2915 + avr32_make_fp_reglist_d (saved_fp_reg_mask, (char *) reglist);
2916 + fprintf (f, "\tldcm.d\tcp0, sp++, %s\n", reglist);
2917 + }
2918 + }
2919 +
2920 + if (saved_reg_mask)
2921 + {
2922 + /* Must pop used registers */
2923 +
2924 + /* Should we use POPM or LDM? */
2925 + int usePOPM = TRUE;
2926 + if (((saved_reg_mask & (1 << 0)) ||
2927 + (saved_reg_mask & (1 << 1)) ||
2928 + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
2929 + {
2930 + /* One of R0-R3 should at least be popped */
2931 + if (((saved_reg_mask & (1 << 0)) &&
2932 + (saved_reg_mask & (1 << 1)) &&
2933 + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
2934 + {
2935 + /* All should be popped */
2936 + reglist8 |= 0x01;
2937 + }
2938 + else
2939 + {
2940 + usePOPM = FALSE;
2941 + }
2942 + }
2943 +
2944 + if (((saved_reg_mask & (1 << 4)) ||
2945 + (saved_reg_mask & (1 << 5)) ||
2946 + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
2947 + {
2948 + /* One of R0-R3 should at least be popped */
2949 + if (((saved_reg_mask & (1 << 4)) &&
2950 + (saved_reg_mask & (1 << 5)) &&
2951 + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
2952 + {
2953 + if (usePOPM)
2954 + /* All should be popped */
2955 + reglist8 |= 0x02;
2956 + }
2957 + else
2958 + {
2959 + usePOPM = FALSE;
2960 + }
2961 + }
2962 +
2963 + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
2964 + {
2965 + /* One of R8-R9 should at least be pushed */
2966 + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
2967 + {
2968 + if (usePOPM)
2969 + /* All should be pushed */
2970 + reglist8 |= 0x04;
2971 + }
2972 + else
2973 + {
2974 + usePOPM = FALSE;
2975 + }
2976 + }
2977 +
2978 + if (saved_reg_mask & (1 << 10))
2979 + reglist8 |= 0x08;
2980 +
2981 + if (saved_reg_mask & (1 << 11))
2982 + reglist8 |= 0x10;
2983 +
2984 + if (saved_reg_mask & (1 << 12))
2985 + reglist8 |= 0x20;
2986 +
2987 + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
2988 + /* Pop LR */
2989 + reglist8 |= 0x40;
2990 +
2991 + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
2992 + /* Pop LR into PC. */
2993 + reglist8 |= 0x80;
2994 +
2995 + if (usePOPM)
2996 + {
2997 + char reglist[64]; /* 64 bytes should be enough... */
2998 + avr32_make_reglist8 (reglist8, (char *) reglist);
2999 +
3000 + if (reglist8 & 0x80)
3001 + /* This instruction is also a return */
3002 + insert_ret = FALSE;
3003 +
3004 + if (r12_imm && !insert_ret)
3005 + fprintf (f, "\tpopm %s, r12=%li\n", reglist, INTVAL (r12_imm));
3006 + else
3007 + fprintf (f, "\tpopm %s\n", reglist);
3008 +
3009 + }
3010 + else
3011 + {
3012 + char reglist[64]; /* 64 bytes should be enough... */
3013 + avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
3014 + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
3015 + /* This instruction is also a return */
3016 + insert_ret = FALSE;
3017 +
3018 + if (r12_imm && !insert_ret)
3019 + fprintf (f, "\tldm sp++, %s, r12=%li\n", reglist,
3020 + INTVAL (r12_imm));
3021 + else
3022 + fprintf (f, "\tldm sp++, %s\n", reglist);
3023 +
3024 + }
3025 +
3026 + }
3027 +
3028 + if (IS_INTERRUPT (func_type))
3029 + {
3030 + fprintf (f, "\trete\n");
3031 + }
3032 + else if (insert_ret)
3033 + {
3034 + if (r12_imm)
3035 + fprintf (f, "\tretal %li\n", INTVAL (r12_imm));
3036 + else
3037 + fprintf (f, "\tretal r12\n");
3038 + }
3039 +}
3040 +
3041 +/* Function for converting a fp-register mask to a
3042 + reglistCPD8 register list string. */
3043 +void
3044 +avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string)
3045 +{
3046 + int i;
3047 +
3048 + /* Make sure reglist_string is empty */
3049 + reglist_string[0] = '\0';
3050 +
3051 + for (i = 0; i < NUM_FP_REGS; i += 2)
3052 + {
3053 + if (reglist_mask & (1 << i))
3054 + {
3055 + strlen (reglist_string) ?
3056 + sprintf (reglist_string, "%s, %s-%s", reglist_string,
3057 + reg_names[INTERNAL_FP_REGNUM (i)],
3058 + reg_names[INTERNAL_FP_REGNUM (i + 1)]) :
3059 + sprintf (reglist_string, "%s-%s",
3060 + reg_names[INTERNAL_FP_REGNUM (i)],
3061 + reg_names[INTERNAL_FP_REGNUM (i + 1)]);
3062 + }
3063 + }
3064 +}
3065 +
3066 +/* Function for converting a fp-register mask to a
3067 + reglistCP8 register list string. */
3068 +void
3069 +avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string)
3070 +{
3071 + int i;
3072 +
3073 + /* Make sure reglist_string is empty */
3074 + reglist_string[0] = '\0';
3075 +
3076 + for (i = 0; i < NUM_FP_REGS; ++i)
3077 + {
3078 + if (reglist_mask & (1 << i))
3079 + {
3080 + strlen (reglist_string) ?
3081 + sprintf (reglist_string, "%s, %s", reglist_string,
3082 + reg_names[INTERNAL_FP_REGNUM (i)]) :
3083 + sprintf (reglist_string, "%s", reg_names[INTERNAL_FP_REGNUM (i)]);
3084 + }
3085 + }
3086 +}
3087 +
3088 +void
3089 +avr32_make_reglist16 (int reglist16_vect, char *reglist16_string)
3090 +{
3091 + int i;
3092 +
3093 + /* Make sure reglist16_string is empty */
3094 + reglist16_string[0] = '\0';
3095 +
3096 + for (i = 0; i < 16; ++i)
3097 + {
3098 + if (reglist16_vect & (1 << i))
3099 + {
3100 + strlen (reglist16_string) ?
3101 + sprintf (reglist16_string, "%s, %s", reglist16_string,
3102 + reg_names[INTERNAL_REGNUM (i)]) :
3103 + sprintf (reglist16_string, "%s", reg_names[INTERNAL_REGNUM (i)]);
3104 + }
3105 + }
3106 +}
3107 +
3108 +int
3109 +avr32_convert_to_reglist16 (int reglist8_vect)
3110 +{
3111 + int reglist16_vect = 0;
3112 + if (reglist8_vect & 0x1)
3113 + reglist16_vect |= 0xF;
3114 + if (reglist8_vect & 0x2)
3115 + reglist16_vect |= 0xF0;
3116 + if (reglist8_vect & 0x4)
3117 + reglist16_vect |= 0x300;
3118 + if (reglist8_vect & 0x8)
3119 + reglist16_vect |= 0x400;
3120 + if (reglist8_vect & 0x10)
3121 + reglist16_vect |= 0x800;
3122 + if (reglist8_vect & 0x20)
3123 + reglist16_vect |= 0x1000;
3124 + if (reglist8_vect & 0x40)
3125 + reglist16_vect |= 0x4000;
3126 + if (reglist8_vect & 0x80)
3127 + reglist16_vect |= 0x8000;
3128 +
3129 + return reglist16_vect;
3130 +}
3131 +
3132 +void
3133 +avr32_make_reglist8 (int reglist8_vect, char *reglist8_string)
3134 +{
3135 + /* Make sure reglist8_string is empty */
3136 + reglist8_string[0] = '\0';
3137 +
3138 + if (reglist8_vect & 0x1)
3139 + sprintf (reglist8_string, "r0-r3");
3140 + if (reglist8_vect & 0x2)
3141 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r4-r7",
3142 + reglist8_string) :
3143 + sprintf (reglist8_string, "r4-r7");
3144 + if (reglist8_vect & 0x4)
3145 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r8-r9",
3146 + reglist8_string) :
3147 + sprintf (reglist8_string, "r8-r9");
3148 + if (reglist8_vect & 0x8)
3149 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r10",
3150 + reglist8_string) :
3151 + sprintf (reglist8_string, "r10");
3152 + if (reglist8_vect & 0x10)
3153 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r11",
3154 + reglist8_string) :
3155 + sprintf (reglist8_string, "r11");
3156 + if (reglist8_vect & 0x20)
3157 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r12",
3158 + reglist8_string) :
3159 + sprintf (reglist8_string, "r12");
3160 + if (reglist8_vect & 0x40)
3161 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, lr",
3162 + reglist8_string) :
3163 + sprintf (reglist8_string, "lr");
3164 + if (reglist8_vect & 0x80)
3165 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, pc",
3166 + reglist8_string) :
3167 + sprintf (reglist8_string, "pc");
3168 +}
3169 +
3170 +int
3171 +avr32_eh_return_data_regno (int n)
3172 +{
3173 + if (n >= 0 && n <= 3)
3174 + return 8 + n;
3175 + else
3176 + return INVALID_REGNUM;
3177 +}
3178 +
3179 +/* Compute the distance from register FROM to register TO.
3180 + These can be the arg pointer, the frame pointer or
3181 + the stack pointer.
3182 + Typical stack layout looks like this:
3183 +
3184 + old stack pointer -> | |
3185 + ----
3186 + | | \
3187 + | | saved arguments for
3188 + | | vararg functions
3189 + arg_pointer -> | | /
3190 + --
3191 + | | \
3192 + | | call saved
3193 + | | registers
3194 + | | /
3195 + frame ptr -> --
3196 + | | \
3197 + | | local
3198 + | | variables
3199 + stack ptr --> | | /
3200 + --
3201 + | | \
3202 + | | outgoing
3203 + | | arguments
3204 + | | /
3205 + --
3206 +
3207 + For a given funciton some or all of these stack compomnents
3208 + may not be needed, giving rise to the possibility of
3209 + eliminating some of the registers.
3210 +
3211 + The values returned by this function must reflect the behaviour
3212 + of avr32_expand_prologue() and avr32_compute_save_reg_mask().
3213 +
3214 + The sign of the number returned reflects the direction of stack
3215 + growth, so the values are positive for all eliminations except
3216 + from the soft frame pointer to the hard frame pointer. */
3217 +
3218 +
3219 +int
3220 +avr32_initial_elimination_offset (int from, int to)
3221 +{
3222 + int i;
3223 + int call_saved_regs = 0;
3224 + unsigned long saved_reg_mask, saved_fp_reg_mask;
3225 + unsigned int local_vars = get_frame_size ();
3226 +
3227 + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
3228 + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
3229 +
3230 + for (i = 0; i < 16; ++i)
3231 + {
3232 + if (saved_reg_mask & (1 << i))
3233 + call_saved_regs += 4;
3234 + }
3235 +
3236 + for (i = 0; i < NUM_FP_REGS; ++i)
3237 + {
3238 + if (saved_fp_reg_mask & (1 << i))
3239 + call_saved_regs += 4;
3240 + }
3241 +
3242 + switch (from)
3243 + {
3244 + case ARG_POINTER_REGNUM:
3245 + switch (to)
3246 + {
3247 + case STACK_POINTER_REGNUM:
3248 + return call_saved_regs + local_vars;
3249 + case FRAME_POINTER_REGNUM:
3250 + return call_saved_regs;
3251 + default:
3252 + abort ();
3253 + }
3254 + case FRAME_POINTER_REGNUM:
3255 + switch (to)
3256 + {
3257 + case STACK_POINTER_REGNUM:
3258 + return local_vars;
3259 + default:
3260 + abort ();
3261 + }
3262 + default:
3263 + abort ();
3264 + }
3265 +}
3266 +
3267 +
3268 +/*
3269 + Returns a rtx used when passing the next argument to a function.
3270 + avr32_init_cumulative_args() and avr32_function_arg_advance() sets witch
3271 + register to use.
3272 +*/
3273 +rtx
3274 +avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
3275 + tree type, int named)
3276 +{
3277 + int index = -1;
3278 +
3279 + HOST_WIDE_INT arg_size, arg_rsize;
3280 + if (type)
3281 + {
3282 + arg_size = int_size_in_bytes (type);
3283 + }
3284 + else
3285 + {
3286 + arg_size = GET_MODE_SIZE (mode);
3287 + }
3288 + arg_rsize = PUSH_ROUNDING (arg_size);
3289 +
3290 + /*
3291 + The last time this macro is called, it is called with mode == VOIDmode,
3292 + and its result is passed to the call or call_value pattern as operands 2
3293 + and 3 respectively. */
3294 + if (mode == VOIDmode)
3295 + {
3296 + return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */
3297 + }
3298 +
3299 + if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
3300 + {
3301 + return NULL_RTX;
3302 + }
3303 +
3304 + if (arg_rsize == 8)
3305 + {
3306 + /* use r11:r10 or r9:r8. */
3307 + if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2)))
3308 + index = 1;
3309 + else if (!(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4)))
3310 + index = 3;
3311 + else
3312 + index = -1;
3313 + }
3314 + else if (arg_rsize == 4)
3315 + { /* Use first available register */
3316 + index = 0;
3317 + while (index <= LAST_CUM_REG_INDEX && GET_USED_INDEX (cum, index))
3318 + index++;
3319 + if (index > LAST_CUM_REG_INDEX)
3320 + index = -1;
3321 + }
3322 +
3323 + SET_REG_INDEX (cum, index);
3324 +
3325 + if (GET_REG_INDEX (cum) >= 0)
3326 + return gen_rtx_REG (mode,
3327 + avr32_function_arg_reglist[GET_REG_INDEX (cum)]);
3328 +
3329 + return NULL_RTX;
3330 +}
3331 +
3332 +/*
3333 + Set the register used for passing the first argument to a function.
3334 +*/
3335 +void
3336 +avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype,
3337 + rtx libname ATTRIBUTE_UNUSED,
3338 + tree fndecl ATTRIBUTE_UNUSED)
3339 +{
3340 + /* Set all registers as unused. */
3341 + SET_INDEXES_UNUSED (cum);
3342 +
3343 + /* Reset uses_anonymous_args */
3344 + cum->uses_anonymous_args = 0;
3345 +
3346 + /* Reset size of stack pushed arguments */
3347 + cum->stack_pushed_args_size = 0;
3348 +
3349 + /* If the function is returning a value passed in memory r12 is used as a
3350 + Return Value Pointer. */
3351 +
3352 + if (fntype != 0 && avr32_return_in_memory (TREE_TYPE (fntype), fntype))
3353 + {
3354 + SET_REG_INDEX (cum, 0);
3355 + SET_USED_INDEX (cum, GET_REG_INDEX (cum));
3356 + }
3357 +}
3358 +
3359 +/*
3360 + Set register used for passing the next argument to a function. Only the
3361 + Scratch Registers are used.
3362 +
3363 + number name
3364 + 15 r15 PC
3365 + 14 r14 LR
3366 + 13 r13 _SP_________
3367 + FIRST_CUM_REG 12 r12 _||_
3368 + 10 r11 ||
3369 + 11 r10 _||_ Scratch Registers
3370 + 8 r9 ||
3371 + LAST_SCRATCH_REG 9 r8 _\/_________
3372 + 6 r7 /\
3373 + 7 r6 ||
3374 + 4 r5 ||
3375 + 5 r4 ||
3376 + 2 r3 ||
3377 + 3 r2 ||
3378 + 0 r1 ||
3379 + 1 r0 _||_________
3380 +
3381 +*/
3382 +void
3383 +avr32_function_arg_advance (CUMULATIVE_ARGS * cum, enum machine_mode mode,
3384 + tree type, int named ATTRIBUTE_UNUSED)
3385 +{
3386 + HOST_WIDE_INT arg_size, arg_rsize;
3387 +
3388 + if (type)
3389 + {
3390 + arg_size = int_size_in_bytes (type);
3391 + }
3392 + else
3393 + {
3394 + arg_size = GET_MODE_SIZE (mode);
3395 + }
3396 + arg_rsize = PUSH_ROUNDING (arg_size);
3397 +
3398 + /* It the argument had to be passed in stack, no register is used. */
3399 + if ((*targetm.calls.must_pass_in_stack) (mode, type))
3400 + {
3401 + cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type));
3402 + return;
3403 + }
3404 +
3405 + /* Mark the used registers as "used". */
3406 + if (GET_REG_INDEX (cum) >= 0)
3407 + {
3408 + SET_USED_INDEX (cum, GET_REG_INDEX (cum));
3409 + if (arg_rsize == 8)
3410 + {
3411 + SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1));
3412 + }
3413 + }
3414 + else
3415 + {
3416 + /* Had to use stack */
3417 + cum->stack_pushed_args_size += arg_rsize;
3418 + }
3419 +}
3420 +
3421 +/*
3422 + Defines witch direction to go to find the next register to use if the
3423 + argument is larger then one register or for arguments shorter than an
3424 + int which is not promoted, such as the last part of structures with
3425 + size not a multiple of 4. */
3426 +enum direction
3427 +avr32_function_arg_padding (enum machine_mode mode ATTRIBUTE_UNUSED,
3428 + tree type)
3429 +{
3430 + /* Pad upward for all aggregates except byte and halfword sized aggregates
3431 + which can be passed in registers. */
3432 + if (type
3433 + && AGGREGATE_TYPE_P (type)
3434 + && (int_size_in_bytes (type) != 1)
3435 + && !((int_size_in_bytes (type) == 2)
3436 + && TYPE_ALIGN_UNIT (type) >= 2)
3437 + && (int_size_in_bytes (type) & 0x3))
3438 + {
3439 + return upward;
3440 + }
3441 +
3442 + return downward;
3443 +}
3444 +
3445 +/*
3446 + Return a rtx used for the return value from a function call.
3447 +*/
3448 +rtx
3449 +avr32_function_value (tree type, tree func)
3450 +{
3451 + if (avr32_return_in_memory (type, func))
3452 + return NULL_RTX;
3453 +
3454 + if (int_size_in_bytes (type) <= 4)
3455 + if (avr32_return_in_msb (type))
3456 + /* Aggregates of size less than a word which does align the data in the
3457 + MSB must use SImode for r12. */
3458 + return gen_rtx_REG (SImode, RET_REGISTER);
3459 + else
3460 + return gen_rtx_REG (TYPE_MODE (type), RET_REGISTER);
3461 + else if (int_size_in_bytes (type) <= 8)
3462 + return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11));
3463 +
3464 + return NULL_RTX;
3465 +}
3466 +
3467 +/*
3468 + Return a rtx used for the return value from a library function call.
3469 +*/
3470 +rtx
3471 +avr32_libcall_value (enum machine_mode mode)
3472 +{
3473 +
3474 + if (GET_MODE_SIZE (mode) <= 4)
3475 + return gen_rtx_REG (mode, RET_REGISTER);
3476 + else if (GET_MODE_SIZE (mode) <= 8)
3477 + return gen_rtx_REG (mode, INTERNAL_REGNUM (11));
3478 + else
3479 + return NULL_RTX;
3480 +}
3481 +
3482 +/* Return TRUE if X references a SYMBOL_REF. */
3483 +int
3484 +symbol_mentioned_p (rtx x)
3485 +{
3486 + const char *fmt;
3487 + int i;
3488 +
3489 + if (GET_CODE (x) == SYMBOL_REF)
3490 + return 1;
3491 +
3492 + fmt = GET_RTX_FORMAT (GET_CODE (x));
3493 +
3494 + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3495 + {
3496 + if (fmt[i] == 'E')
3497 + {
3498 + int j;
3499 +
3500 + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3501 + if (symbol_mentioned_p (XVECEXP (x, i, j)))
3502 + return 1;
3503 + }
3504 + else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
3505 + return 1;
3506 + }
3507 +
3508 + return 0;
3509 +}
3510 +
3511 +/* Return TRUE if X references a LABEL_REF. */
3512 +int
3513 +label_mentioned_p (rtx x)
3514 +{
3515 + const char *fmt;
3516 + int i;
3517 +
3518 + if (GET_CODE (x) == LABEL_REF)
3519 + return 1;
3520 +
3521 + fmt = GET_RTX_FORMAT (GET_CODE (x));
3522 + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3523 + {
3524 + if (fmt[i] == 'E')
3525 + {
3526 + int j;
3527 +
3528 + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3529 + if (label_mentioned_p (XVECEXP (x, i, j)))
3530 + return 1;
3531 + }
3532 + else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
3533 + return 1;
3534 + }
3535 +
3536 + return 0;
3537 +}
3538 +
3539 +
3540 +int
3541 +avr32_legitimate_pic_operand_p (rtx x)
3542 +{
3543 +
3544 + /* We can't have const, this must be broken down to a symbol. */
3545 + if (GET_CODE (x) == CONST)
3546 + return FALSE;
3547 +
3548 + /* Can't access symbols or labels via the constant pool either */
3549 + if ((GET_CODE (x) == SYMBOL_REF
3550 + && CONSTANT_POOL_ADDRESS_P (x)
3551 + && (symbol_mentioned_p (get_pool_constant (x))
3552 + || label_mentioned_p (get_pool_constant (x)))))
3553 + return FALSE;
3554 +
3555 + return TRUE;
3556 +}
3557 +
3558 +
3559 +rtx
3560 +legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3561 + rtx reg)
3562 +{
3563 +
3564 + if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
3565 + {
3566 + int subregs = 0;
3567 +
3568 + if (reg == 0)
3569 + {
3570 + if (no_new_pseudos)
3571 + abort ();
3572 + else
3573 + reg = gen_reg_rtx (Pmode);
3574 +
3575 + subregs = 1;
3576 + }
3577 +
3578 + emit_move_insn (reg, orig);
3579 +
3580 + /* Only set current function as using pic offset table if flag_pic is
3581 + set. This is because this function is also used if
3582 + TARGET_HAS_ASM_ADDR_PSEUDOS is set. */
3583 + if (flag_pic)
3584 + current_function_uses_pic_offset_table = 1;
3585 +
3586 + /* Put a REG_EQUAL note on this insn, so that it can be optimized by
3587 + loop. */
3588 + return reg;
3589 + }
3590 + else if (GET_CODE (orig) == CONST)
3591 + {
3592 + rtx base, offset;
3593 +
3594 + if (flag_pic
3595 + && GET_CODE (XEXP (orig, 0)) == PLUS
3596 + && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3597 + return orig;
3598 +
3599 + if (reg == 0)
3600 + {
3601 + if (no_new_pseudos)
3602 + abort ();
3603 + else
3604 + reg = gen_reg_rtx (Pmode);
3605 + }
3606 +
3607 + if (GET_CODE (XEXP (orig, 0)) == PLUS)
3608 + {
3609 + base =
3610 + legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3611 + offset =
3612 + legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3613 + base == reg ? 0 : reg);
3614 + }
3615 + else
3616 + abort ();
3617 +
3618 + if (GET_CODE (offset) == CONST_INT)
3619 + {
3620 + /* The base register doesn't really matter, we only want to test
3621 + the index for the appropriate mode. */
3622 + if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21"))
3623 + {
3624 + if (!no_new_pseudos)
3625 + offset = force_reg (Pmode, offset);
3626 + else
3627 + abort ();
3628 + }
3629 +
3630 + if (GET_CODE (offset) == CONST_INT)
3631 + return plus_constant (base, INTVAL (offset));
3632 + }
3633 +
3634 + return gen_rtx_PLUS (Pmode, base, offset);
3635 + }
3636 +
3637 + return orig;
3638 +}
3639 +
3640 +/* Generate code to load the PIC register. */
3641 +void
3642 +avr32_load_pic_register (void)
3643 +{
3644 + rtx l1, pic_tmp;
3645 + rtx global_offset_table;
3646 +
3647 + if ((current_function_uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT)
3648 + return;
3649 +
3650 + if (!flag_pic)
3651 + abort ();
3652 +
3653 + l1 = gen_label_rtx ();
3654 +
3655 + global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3656 + pic_tmp =
3657 + gen_rtx_CONST (Pmode,
3658 + gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1),
3659 + global_offset_table));
3660 + emit_insn (gen_pic_load_addr
3661 + (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp)));
3662 + emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1));
3663 +
3664 + /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp
3665 + can cause life info to screw up. */
3666 + emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3667 +}
3668 +
3669 +
3670 +
3671 +/* This hook should return true if values of type type are returned at the most
3672 + significant end of a register (in other words, if they are padded at the
3673 + least significant end). You can assume that type is returned in a register;
3674 + the caller is required to check this. Note that the register provided by
3675 + FUNCTION_VALUE must be able to hold the complete return value. For example,
3676 + if a 1-, 2- or 3-byte structure is returned at the most significant end of a
3677 + 4-byte register, FUNCTION_VALUE should provide an SImode rtx. */
3678 +bool
3679 +avr32_return_in_msb (tree type ATTRIBUTE_UNUSED)
3680 +{
3681 + /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) ||
3682 + ((int_size_in_bytes(type) == 2) && TYPE_ALIGN_UNIT(type) >= 2)) return
3683 + false; else return true; */
3684 +
3685 + return false;
3686 +}
3687 +
3688 +
3689 +/*
3690 + Returns one if a certain function value is going to be returned in memory
3691 + and zero if it is going to be returned in a register.
3692 +
3693 + BLKmode and all other modes that is larger than 64 bits are returned in
3694 + memory.
3695 +*/
3696 +bool
3697 +avr32_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
3698 +{
3699 + if (TYPE_MODE (type) == VOIDmode)
3700 + return false;
3701 +
3702 + if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD)
3703 + || int_size_in_bytes (type) == -1)
3704 + {
3705 + return true;
3706 + }
3707 +
3708 + /* If we have an aggregate then use the same mechanism as when checking if
3709 + it should be passed on the stack. */
3710 + if (type
3711 + && AGGREGATE_TYPE_P (type)
3712 + && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type))
3713 + return true;
3714 +
3715 + return false;
3716 +}
3717 +
3718 +
3719 +/* Output the constant part of the trampoline.
3720 + lddpc r0, pc[0x8:e] ; load static chain register
3721 + lddpc pc, pc[0x8:e] ; jump to subrutine
3722 + .long 0 ; Address to static chain,
3723 + ; filled in by avr32_initialize_trampoline()
3724 + .long 0 ; Address to subrutine,
3725 + ; filled in by avr32_initialize_trampoline()
3726 +*/
3727 +void
3728 +avr32_trampoline_template (FILE * file)
3729 +{
3730 + fprintf (file, "\tlddpc r0, pc[8]\n");
3731 + fprintf (file, "\tlddpc pc, pc[8]\n");
3732 + /* make room for the address of the static chain. */
3733 + fprintf (file, "\t.long\t0\n");
3734 + /* make room for the address to the subrutine. */
3735 + fprintf (file, "\t.long\t0\n");
3736 +}
3737 +
3738 +
3739 +/*
3740 + Initialize the variable parts of a trampoline.
3741 +*/
3742 +void
3743 +avr32_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3744 +{
3745 + /* Store the address to the static chain. */
3746 + emit_move_insn (gen_rtx_MEM
3747 + (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)),
3748 + static_chain);
3749 +
3750 + /* Store the address to the function. */
3751 + emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)),
3752 + fnaddr);
3753 +
3754 + emit_insn (gen_cache (gen_rtx_REG (SImode, 13),
3755 + gen_rtx_CONST_INT (SImode,
3756 + AVR32_CACHE_INVALIDATE_ICACHE)));
3757 +}
3758 +
3759 +/* Return nonzero if X is valid as an addressing register. */
3760 +int
3761 +avr32_address_register_rtx_p (rtx x, int strict_p)
3762 +{
3763 + int regno;
3764 +
3765 + if (GET_CODE (x) != REG)
3766 + return 0;
3767 +
3768 + regno = REGNO (x);
3769 +
3770 + if (strict_p)
3771 + return REGNO_OK_FOR_BASE_P (regno);
3772 +
3773 + return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER);
3774 +}
3775 +
3776 +/* Return nonzero if INDEX is valid for an address index operand. */
3777 +int
3778 +avr32_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
3779 +{
3780 + enum rtx_code code = GET_CODE (index);
3781 +
3782 + if (mode == TImode)
3783 + return 0;
3784 +
3785 + /* Standard coprocessor addressing modes. */
3786 + if (code == CONST_INT)
3787 + {
3788 + if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
3789 + /* Coprocessor mem insns has a smaller reach than ordinary mem insns */
3790 + return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ku14");
3791 + else
3792 + return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16");
3793 + }
3794 +
3795 + if (avr32_address_register_rtx_p (index, strict_p))
3796 + return 1;
3797 +
3798 + if (code == MULT)
3799 + {
3800 + rtx xiop0 = XEXP (index, 0);
3801 + rtx xiop1 = XEXP (index, 1);
3802 + return ((avr32_address_register_rtx_p (xiop0, strict_p)
3803 + && power_of_two_operand (xiop1, SImode)
3804 + && (INTVAL (xiop1) <= 8))
3805 + || (avr32_address_register_rtx_p (xiop1, strict_p)
3806 + && power_of_two_operand (xiop0, SImode)
3807 + && (INTVAL (xiop0) <= 8)));
3808 + }
3809 + else if (code == ASHIFT)
3810 + {
3811 + rtx op = XEXP (index, 1);
3812 +
3813 + return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p)
3814 + && GET_CODE (op) == CONST_INT
3815 + && INTVAL (op) > 0 && INTVAL (op) <= 3);
3816 + }
3817 +
3818 + return 0;
3819 +}
3820 +
3821 +/*
3822 + Used in the GO_IF_LEGITIMATE_ADDRESS macro. Returns a nonzero value if
3823 + the RTX x is a legitimate memory address.
3824 +
3825 + Returns NO_REGS if the address is not legatime, GENERAL_REGS or ALL_REGS
3826 + if it is.
3827 +*/
3828 +
3829 +/* Forward declaration*/
3830 +int is_minipool_label (rtx label);
3831 +
3832 +int
3833 +avr32_legitimate_address (enum machine_mode mode ATTRIBUTE_UNUSED,
3834 + rtx x, int strict)
3835 +{
3836 +
3837 + switch (GET_CODE (x))
3838 + {
3839 + case REG:
3840 + return avr32_address_register_rtx_p (x, strict);
3841 + case CONST:
3842 + {
3843 + rtx label = avr32_find_symbol (x);
3844 + if (label
3845 + &&
3846 + ( (CONSTANT_POOL_ADDRESS_P (label)
3847 + && !(flag_pic
3848 + && (symbol_mentioned_p (get_pool_constant (label))
3849 + || label_mentioned_p (get_pool_constant(label)))))
3850 + /* TODO! Can this ever happen??? */
3851 + || ((GET_CODE (label) == LABEL_REF)
3852 + && GET_CODE (XEXP (label, 0)) == CODE_LABEL
3853 + && is_minipool_label (XEXP (label, 0)))))
3854 + {
3855 + return TRUE;
3856 + }
3857 + }
3858 + break;
3859 + case LABEL_REF:
3860 + if (GET_CODE (XEXP (x, 0)) == CODE_LABEL
3861 + && is_minipool_label (XEXP (x, 0)))
3862 + {
3863 + return TRUE;
3864 + }
3865 + break;
3866 + case SYMBOL_REF:
3867 + {
3868 + if (CONSTANT_POOL_ADDRESS_P (x)
3869 + && !(flag_pic
3870 + && (symbol_mentioned_p (get_pool_constant (x))
3871 + || label_mentioned_p (get_pool_constant (x)))))
3872 + return TRUE;
3873 + /*
3874 + A symbol_ref is only legal if it is a function. If all of them are
3875 + legal, a pseudo reg that is a constant will be replaced by a
3876 + symbol_ref and make illegale code. SYMBOL_REF_FLAG is set by
3877 + ENCODE_SECTION_INFO. */
3878 + else if (SYMBOL_REF_RCALL_FUNCTION_P (x))
3879 + return TRUE;
3880 + break;
3881 + }
3882 + case PRE_DEC: /* (pre_dec (...)) */
3883 + case POST_INC: /* (post_inc (...)) */
3884 + return avr32_address_register_rtx_p (XEXP (x, 0), strict);
3885 + case PLUS: /* (plus (...) (...)) */
3886 + {
3887 + rtx xop0 = XEXP (x, 0);
3888 + rtx xop1 = XEXP (x, 1);
3889 +
3890 + return ((avr32_address_register_rtx_p (xop0, strict)
3891 + && avr32_legitimate_index_p (mode, xop1, strict))
3892 + || (avr32_address_register_rtx_p (xop1, strict)
3893 + && avr32_legitimate_index_p (mode, xop0, strict)));
3894 + }
3895 + default:
3896 + break;
3897 + }
3898 +
3899 + return FALSE;
3900 +}
3901 +
3902 +
3903 +int
3904 +avr32_const_double_immediate (rtx value)
3905 +{
3906 + HOST_WIDE_INT hi, lo;
3907 +
3908 + if (GET_CODE (value) != CONST_DOUBLE)
3909 + return FALSE;
3910 +
3911 + if (GET_MODE (value) == DImode)
3912 + {
3913 + hi = CONST_DOUBLE_HIGH (value);
3914 + lo = CONST_DOUBLE_LOW (value);
3915 + }
3916 + else
3917 + {
3918 + HOST_WIDE_INT target_float[2];
3919 + hi = lo = 0;
3920 + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value),
3921 + GET_MODE (value));
3922 + lo = target_float[0];
3923 + hi = target_float[1];
3924 + }
3925 + if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
3926 + && ((GET_MODE (value) == SFmode)
3927 + || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
3928 + {
3929 + return TRUE;
3930 + }
3931 +
3932 + return FALSE;
3933 +}
3934 +
3935 +
3936 +int
3937 +avr32_legitimate_constant_p (rtx x)
3938 +{
3939 + switch (GET_CODE (x))
3940 + {
3941 + case CONST_INT:
3942 + return avr32_const_ok_for_constraint_p (INTVAL (x), 'K', "Ks21");
3943 + case CONST_DOUBLE:
3944 + if (GET_MODE (x) == SFmode
3945 + || GET_MODE (x) == DFmode || GET_MODE (x) == DImode)
3946 + return avr32_const_double_immediate (x);
3947 + else
3948 + return 0;
3949 + case LABEL_REF:
3950 + return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS;
3951 + case SYMBOL_REF:
3952 + return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS;
3953 + case CONST:
3954 + /* We must handle this one in the movsi expansion in order for gcc not
3955 + to put it in the constant pool. */
3956 + return 0 /* flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS */ ;
3957 + case HIGH:
3958 + case CONST_VECTOR:
3959 + return 0;
3960 + default:
3961 + printf ("%s():\n", __FUNCTION__);
3962 + debug_rtx (x);
3963 + return 1;
3964 + }
3965 +}
3966 +
3967 +
3968 +/* Strip any special encoding from labels */
3969 +const char *
3970 +avr32_strip_name_encoding (const char *name)
3971 +{
3972 + const char *stripped = name;
3973 +
3974 + while (1)
3975 + {
3976 + switch (stripped[0])
3977 + {
3978 + case '#':
3979 + stripped = strchr (name + 1, '#') + 1;
3980 + break;
3981 + case '*':
3982 + stripped = &stripped[1];
3983 + break;
3984 + default:
3985 + return stripped;
3986 + }
3987 + }
3988 +}
3989 +
3990 +
3991 +
3992 +/* Do anything needed before RTL is emitted for each function. */
3993 +static struct machine_function *
3994 +avr32_init_machine_status (void)
3995 +{
3996 + struct machine_function *machine;
3997 + machine =
3998 + (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
3999 +
4000 +#if AVR32_FT_UNKNOWN != 0
4001 + machine->func_type = AVR32_FT_UNKNOWN;
4002 +#endif
4003 +
4004 + machine->minipool_label_head = 0;
4005 + machine->minipool_label_tail = 0;
4006 + return machine;
4007 +}
4008 +
4009 +void
4010 +avr32_init_expanders (void)
4011 +{
4012 + /* Arrange to initialize and mark the machine per-function status. */
4013 + init_machine_status = avr32_init_machine_status;
4014 +}
4015 +
4016 +
4017 +/* Return an RTX indicating where the return address to the
4018 + calling function can be found. */
4019 +
4020 +rtx
4021 +avr32_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4022 +{
4023 + if (count != 0)
4024 + return NULL_RTX;
4025 +
4026 + return get_hard_reg_initial_val (Pmode, LR_REGNUM);
4027 +}
4028 +
4029 +
4030 +void
4031 +avr32_encode_section_info (tree decl, rtx rtl, int first)
4032 +{
4033 +
4034 + if (first && DECL_P (decl))
4035 + {
4036 + /* Set SYMBOL_REG_FLAG for local functions */
4037 + if (!TREE_PUBLIC (decl) && TREE_CODE (decl) == FUNCTION_DECL)
4038 + {
4039 + if ((*targetm.binds_local_p) (decl))
4040 + {
4041 + SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
4042 + }
4043 + }
4044 + }
4045 +}
4046 +
4047 +
4048 +void
4049 +avr32_asm_output_ascii (FILE * stream, char *ptr, int len)
4050 +{
4051 + int i, i_new = 0;
4052 + char *new_ptr = xmalloc (4 * len);
4053 + if (new_ptr == NULL)
4054 + internal_error ("Out of memory.");
4055 +
4056 + for (i = 0; i < len; i++)
4057 + {
4058 + if (ptr[i] == '\n')
4059 + {
4060 + new_ptr[i_new++] = '\\';
4061 + new_ptr[i_new++] = '0';
4062 + new_ptr[i_new++] = '1';
4063 + new_ptr[i_new++] = '2';
4064 + }
4065 + else if (ptr[i] == '\"')
4066 + {
4067 + new_ptr[i_new++] = '\\';
4068 + new_ptr[i_new++] = '\"';
4069 + }
4070 + else if (ptr[i] == '\\')
4071 + {
4072 + new_ptr[i_new++] = '\\';
4073 + new_ptr[i_new++] = '\\';
4074 + }
4075 + else if (ptr[i] == '\0' && i + 1 < len)
4076 + {
4077 + new_ptr[i_new++] = '\\';
4078 + new_ptr[i_new++] = '0';
4079 + }
4080 + else
4081 + {
4082 + new_ptr[i_new++] = ptr[i];
4083 + }
4084 + }
4085 +
4086 + /* Terminate new_ptr. */
4087 + new_ptr[i_new] = '\0';
4088 + fprintf (stream, "\t.ascii\t\"%s\"\n", new_ptr);
4089 + free (new_ptr);
4090 +}
4091 +
4092 +
4093 +void
4094 +avr32_asm_output_label (FILE * stream, const char *name)
4095 +{
4096 + name = avr32_strip_name_encoding (name);
4097 +
4098 + /* Print the label. */
4099 + assemble_name (stream, name);
4100 + fprintf (stream, ":\n");
4101 +}
4102 +
4103 +
4104 +
4105 +void
4106 +avr32_asm_weaken_label (FILE * stream, const char *name)
4107 +{
4108 + fprintf (stream, "\t.weak ");
4109 + assemble_name (stream, name);
4110 + fprintf (stream, "\n");
4111 +}
4112 +
4113 +/*
4114 + Checks if a labelref is equal to a reserved word in the assembler. If it is,
4115 + insert a '_' before the label name.
4116 +*/
4117 +void
4118 +avr32_asm_output_labelref (FILE * stream, const char *name)
4119 +{
4120 + int verbatim = FALSE;
4121 + const char *stripped = name;
4122 + int strip_finished = FALSE;
4123 +
4124 + while (!strip_finished)
4125 + {
4126 + switch (stripped[0])
4127 + {
4128 + case '#':
4129 + stripped = strchr (name + 1, '#') + 1;
4130 + break;
4131 + case '*':
4132 + stripped = &stripped[1];
4133 + verbatim = TRUE;
4134 + break;
4135 + default:
4136 + strip_finished = TRUE;
4137 + break;
4138 + }
4139 + }
4140 +
4141 + if (verbatim)
4142 + fputs (stripped, stream);
4143 + else
4144 + asm_fprintf (stream, "%U%s", stripped);
4145 +}
4146 +
4147 +
4148 +
4149 +/*
4150 + Check if the comparison in compare_exp is redundant
4151 + for the condition given in next_cond given that the
4152 + needed flags are already set by an earlier instruction.
4153 + Uses cc_prev_status to check this.
4154 +
4155 + Returns NULL_RTX if the compare is not redundant
4156 + or the new condition to use in the conditional
4157 + instruction if the compare is redundant.
4158 +*/
4159 +static rtx
4160 +is_compare_redundant (rtx compare_exp, rtx next_cond)
4161 +{
4162 + int z_flag_valid = FALSE;
4163 + int n_flag_valid = FALSE;
4164 + rtx new_cond;
4165 +
4166 + if (GET_CODE (compare_exp) != COMPARE)
4167 + return NULL_RTX;
4168 +
4169 +
4170 + if (GET_MODE (compare_exp) != SImode)
4171 + return NULL_RTX;
4172 +
4173 + if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp))
4174 + {
4175 + /* cc0 already contains the correct comparison -> delete cmp insn */
4176 + return next_cond;
4177 + }
4178 +
4179 + switch (cc_prev_status.mdep.flags)
4180 + {
4181 + case CC_SET_VNCZ:
4182 + case CC_SET_NCZ:
4183 + n_flag_valid = TRUE;
4184 + case CC_SET_CZ:
4185 + case CC_SET_Z:
4186 + z_flag_valid = TRUE;
4187 + }
4188 +
4189 + if (cc_prev_status.mdep.value
4190 + && REG_P (XEXP (compare_exp, 0))
4191 + && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value)
4192 + && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT
4193 + && next_cond != NULL_RTX)
4194 + {
4195 + if (INTVAL (XEXP (compare_exp, 1)) == 0
4196 + && z_flag_valid
4197 + && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE))
4198 + /* We can skip comparison Z flag is already reflecting ops[0] */
4199 + return next_cond;
4200 + else if (n_flag_valid
4201 + && ((INTVAL (XEXP (compare_exp, 1)) == 0
4202 + && (GET_CODE (next_cond) == GE
4203 + || GET_CODE (next_cond) == LT))
4204 + || (INTVAL (XEXP (compare_exp, 1)) == -1
4205 + && (GET_CODE (next_cond) == GT
4206 + || GET_CODE (next_cond) == LE))))
4207 + {
4208 + /* We can skip comparison N flag is already reflecting ops[0],
4209 + which means that we can use the mi/pl conditions to check if
4210 + ops[0] is GE or LT 0. */
4211 + if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT))
4212 + new_cond =
4213 + gen_rtx_UNSPEC (CCmode, gen_rtvec (2, cc0_rtx, const0_rtx),
4214 + UNSPEC_COND_PL);
4215 + else
4216 + new_cond =
4217 + gen_rtx_UNSPEC (CCmode, gen_rtvec (2, cc0_rtx, const0_rtx),
4218 + UNSPEC_COND_MI);
4219 + return new_cond;
4220 + }
4221 + }
4222 + return NULL_RTX;
4223 +}
4224 +
4225 +/* Updates cc_status. */
4226 +void
4227 +avr32_notice_update_cc (rtx exp, rtx insn)
4228 +{
4229 + switch (get_attr_cc (insn))
4230 + {
4231 + case CC_CALL_SET:
4232 + CC_STATUS_INIT;
4233 + FPCC_STATUS_INIT;
4234 + /* Check if the function call returns a value in r12 */
4235 + if (REG_P (recog_data.operand[0])
4236 + && REGNO (recog_data.operand[0]) == RETVAL_REGNUM)
4237 + {
4238 + cc_status.flags = 0;
4239 + cc_status.mdep.value =
4240 + gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx);
4241 + cc_status.mdep.flags = CC_SET_VNCZ;
4242 +
4243 + }
4244 + break;
4245 + case CC_COMPARE:
4246 + /* Check that compare will not be optimized away if so nothing should
4247 + be done */
4248 + if (is_compare_redundant (SET_SRC (exp), get_next_insn_cond (insn))
4249 + == NULL_RTX)
4250 + {
4251 +
4252 + /* Reset the nonstandard flag */
4253 + CC_STATUS_INIT;
4254 + cc_status.flags = 0;
4255 + cc_status.mdep.value = SET_SRC (exp);
4256 + cc_status.mdep.flags = CC_SET_VNCZ;
4257 + }
4258 + break;
4259 + case CC_FPCOMPARE:
4260 + /* Check that floating-point compare will not be optimized away if so
4261 + nothing should be done */
4262 + if (!rtx_equal_p (cc_prev_status.mdep.fpvalue, SET_SRC (exp)))
4263 + {
4264 + /* cc0 already contains the correct comparison -> delete cmp insn */
4265 + /* Reset the nonstandard flag */
4266 + cc_status.mdep.fpvalue = SET_SRC (exp);
4267 + cc_status.mdep.fpflags = CC_SET_CZ;
4268 + }
4269 + break;
4270 + case CC_FROM_FPCC:
4271 + /* Flags are updated with flags from Floating-point coprocessor, set
4272 + CC_NOT_SIGNED flag since the flags are set so that unsigned
4273 + condidion codes can be used directly. */
4274 + CC_STATUS_INIT;
4275 + cc_status.flags = CC_NOT_SIGNED;
4276 + cc_status.mdep.value = cc_status.mdep.fpvalue;
4277 + cc_status.mdep.flags = cc_status.mdep.fpflags;
4278 + break;
4279 + case CC_BLD:
4280 + /* Bit load is kind of like an inverted testsi, because the Z flag is
4281 + inverted */
4282 + CC_STATUS_INIT;
4283 + cc_status.flags = CC_INVERTED;
4284 + cc_status.mdep.value = SET_SRC (exp);
4285 + cc_status.mdep.flags = CC_SET_Z;
4286 + break;
4287 + case CC_NONE:
4288 + /* Insn does not affect CC at all. Check if the instruction updates
4289 + some of the register currently reflected in cc0 */
4290 +
4291 + if ((GET_CODE (exp) == SET)
4292 + && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value)
4293 + && (reg_mentioned_p (SET_DEST (exp), cc_status.value1)
4294 + || reg_mentioned_p (SET_DEST (exp), cc_status.value2)
4295 + || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value)))
4296 + {
4297 + CC_STATUS_INIT;
4298 + }
4299 +
4300 + /* If this is a parallel we must step through each of the parallel
4301 + expressions */
4302 + if (GET_CODE (exp) == PARALLEL)
4303 + {
4304 + int i;
4305 + for (i = 0; i < XVECLEN (exp, 0); ++i)
4306 + {
4307 + rtx vec_exp = XVECEXP (exp, 0, i);
4308 + if ((GET_CODE (vec_exp) == SET)
4309 + && (cc_status.value1 || cc_status.value2
4310 + || cc_status.mdep.value)
4311 + && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1)
4312 + || reg_mentioned_p (SET_DEST (vec_exp),
4313 + cc_status.value2)
4314 + || reg_mentioned_p (SET_DEST (vec_exp),
4315 + cc_status.mdep.value)))
4316 + {
4317 + CC_STATUS_INIT;
4318 + }
4319 + }
4320 + }
4321 +
4322 + /* Check if we have memory opartions with post_inc or pre_dec on the
4323 + register currently reflected in cc0 */
4324 + if (GET_CODE (exp) == SET
4325 + && GET_CODE (SET_SRC (exp)) == MEM
4326 + && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC
4327 + || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC)
4328 + &&
4329 + (reg_mentioned_p
4330 + (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1)
4331 + || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
4332 + cc_status.value2)
4333 + || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
4334 + cc_status.mdep.value)))
4335 + CC_STATUS_INIT;
4336 +
4337 + if (GET_CODE (exp) == SET
4338 + && GET_CODE (SET_DEST (exp)) == MEM
4339 + && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC
4340 + || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC)
4341 + &&
4342 + (reg_mentioned_p
4343 + (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1)
4344 + || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
4345 + cc_status.value2)
4346 + || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
4347 + cc_status.mdep.value)))
4348 + CC_STATUS_INIT;
4349 + break;
4350 +
4351 + case CC_SET_VNCZ:
4352 + CC_STATUS_INIT;
4353 + cc_status.mdep.value = recog_data.operand[0];
4354 + cc_status.mdep.flags = CC_SET_VNCZ;
4355 + break;
4356 +
4357 + case CC_SET_NCZ:
4358 + CC_STATUS_INIT;
4359 + cc_status.mdep.value = recog_data.operand[0];
4360 + cc_status.mdep.flags = CC_SET_NCZ;
4361 + break;
4362 +
4363 + case CC_SET_CZ:
4364 + CC_STATUS_INIT;
4365 + cc_status.mdep.value = recog_data.operand[0];
4366 + cc_status.mdep.flags = CC_SET_CZ;
4367 + break;
4368 +
4369 + case CC_SET_Z:
4370 + CC_STATUS_INIT;
4371 + cc_status.mdep.value = recog_data.operand[0];
4372 + cc_status.mdep.flags = CC_SET_Z;
4373 + break;
4374 +
4375 + case CC_CLOBBER:
4376 + CC_STATUS_INIT;
4377 + break;
4378 +
4379 + default:
4380 + CC_STATUS_INIT;
4381 + }
4382 +}
4383 +
4384 +
4385 +/*
4386 + Outputs to stdio stream stream the assembler syntax for an instruction
4387 + operand x. x is an RTL expression.
4388 +*/
4389 +void
4390 +avr32_print_operand (FILE * stream, rtx x, int code)
4391 +{
4392 + int error = 0;
4393 +
4394 + switch (GET_CODE (x))
4395 + {
4396 + case UNSPEC:
4397 + switch (XINT (x, 1))
4398 + {
4399 + case UNSPEC_COND_PL:
4400 + if (code == 'i')
4401 + fputs ("mi", stream);
4402 + else
4403 + fputs ("pl", stream);
4404 + break;
4405 + case UNSPEC_COND_MI:
4406 + if (code == 'i')
4407 + fputs ("pl", stream);
4408 + else
4409 + fputs ("mi", stream);
4410 + break;
4411 + default:
4412 + error = 1;
4413 + }
4414 + break;
4415 + case EQ:
4416 + if (code == 'i')
4417 + fputs ("ne", stream);
4418 + else
4419 + fputs ("eq", stream);
4420 + break;
4421 + case NE:
4422 + if (code == 'i')
4423 + fputs ("eq", stream);
4424 + else
4425 + fputs ("ne", stream);
4426 + break;
4427 + case GT:
4428 + if (code == 'i')
4429 + fputs ("le", stream);
4430 + else
4431 + fputs ("gt", stream);
4432 + break;
4433 + case GTU:
4434 + if (code == 'i')
4435 + fputs ("ls", stream);
4436 + else
4437 + fputs ("hi", stream);
4438 + break;
4439 + case LT:
4440 + if (code == 'i')
4441 + fputs ("ge", stream);
4442 + else
4443 + fputs ("lt", stream);
4444 + break;
4445 + case LTU:
4446 + if (code == 'i')
4447 + fputs ("hs", stream);
4448 + else
4449 + fputs ("lo", stream);
4450 + break;
4451 + case GE:
4452 + if (code == 'i')
4453 + fputs ("lt", stream);
4454 + else
4455 + fputs ("ge", stream);
4456 + break;
4457 + case GEU:
4458 + if (code == 'i')
4459 + fputs ("lo", stream);
4460 + else
4461 + fputs ("hs", stream);
4462 + break;
4463 + case LE:
4464 + if (code == 'i')
4465 + fputs ("gt", stream);
4466 + else
4467 + fputs ("le", stream);
4468 + break;
4469 + case LEU:
4470 + if (code == 'i')
4471 + fputs ("hi", stream);
4472 + else
4473 + fputs ("ls", stream);
4474 + break;
4475 + case CONST_INT:
4476 + {
4477 + int value = INTVAL (x);
4478 +
4479 + if (code == 'i')
4480 + {
4481 + value++;
4482 + }
4483 +
4484 + if (code == 'p')
4485 + {
4486 + /* Set to bit position of first bit set in immediate */
4487 + int i, bitpos = 32;
4488 + for (i = 0; i < 32; i++)
4489 + if (value & (1 << i))
4490 + {
4491 + bitpos = i;
4492 + break;
4493 + }
4494 + value = bitpos;
4495 + }
4496 +
4497 + if (code == 'r')
4498 + {
4499 + /* Reglist 8 */
4500 + char op[50];
4501 + op[0] = '\0';
4502 +
4503 + if (value & 0x01)
4504 + sprintf (op, "r0-r3");
4505 + if (value & 0x02)
4506 + strlen (op) ? sprintf (op, "%s, r4-r7", op) : sprintf (op,
4507 + "r4-r7");
4508 + if (value & 0x04)
4509 + strlen (op) ? sprintf (op, "%s, r8-r9", op) : sprintf (op,
4510 + "r8-r9");
4511 + if (value & 0x08)
4512 + strlen (op) ? sprintf (op, "%s, r10", op) : sprintf (op, "r10");
4513 + if (value & 0x10)
4514 + strlen (op) ? sprintf (op, "%s, r11", op) : sprintf (op, "r11");
4515 + if (value & 0x20)
4516 + strlen (op) ? sprintf (op, "%s, r12", op) : sprintf (op, "r12");
4517 + if (value & 0x40)
4518 + strlen (op) ? sprintf (op, "%s, lr", op) : sprintf (op, "lr");
4519 + if (value & 0x80)
4520 + strlen (op) ? sprintf (op, "%s, pc", op) : sprintf (op, "pc");
4521 +
4522 + fputs (op, stream);
4523 + }
4524 + else if (code == 's')
4525 + {
4526 + /* Reglist 16 */
4527 + char reglist16_string[100];
4528 + int i;
4529 + reglist16_string[0] = '\0';
4530 +
4531 + for (i = 0; i < 16; ++i)
4532 + {
4533 + if (value & (1 << i))
4534 + {
4535 + strlen (reglist16_string) ? sprintf (reglist16_string,
4536 + "%s, %s",
4537 + reglist16_string,
4538 + reg_names
4539 + [INTERNAL_REGNUM
4540 + (i)]) :
4541 + sprintf (reglist16_string, "%s",
4542 + reg_names[INTERNAL_REGNUM (i)]);
4543 + }
4544 + }
4545 + fputs (reglist16_string, stream);
4546 + }
4547 + else if (code == 'C')
4548 + {
4549 + /* RegListCP8 */
4550 + char reglist_string[100];
4551 + avr32_make_fp_reglist_w (value, (char *) reglist_string);
4552 + fputs (reglist_string, stream);
4553 + }
4554 + else if (code == 'D')
4555 + {
4556 + /* RegListCPD8 */
4557 + char reglist_string[100];
4558 + avr32_make_fp_reglist_d (value, (char *) reglist_string);
4559 + fputs (reglist_string, stream);
4560 + }
4561 + else if (code == 'd')
4562 + {
4563 + /* Print in decimal format */
4564 + fprintf (stream, "%d", value);
4565 + }
4566 + else if (code == 'h')
4567 + {
4568 + /* Print halfword part of word */
4569 + fputs (value ? "b" : "t", stream);
4570 + }
4571 + else
4572 + {
4573 + /* Normal constant */
4574 + fprintf (stream, "%d", value);
4575 + }
4576 + break;
4577 + }
4578 + case CONST_DOUBLE:
4579 + {
4580 + HOST_WIDE_INT hi, lo;
4581 + if (GET_MODE (x) == DImode)
4582 + {
4583 + hi = CONST_DOUBLE_HIGH (x);
4584 + lo = CONST_DOUBLE_LOW (x);
4585 + }
4586 + else
4587 + {
4588 + HOST_WIDE_INT target_float[2];
4589 + hi = lo = 0;
4590 + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x),
4591 + GET_MODE (x));
4592 + /* For doubles the most significant part starts at index 0. */
4593 + if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
4594 + {
4595 + hi = target_float[0];
4596 + lo = target_float[1];
4597 + }
4598 + else
4599 + {
4600 + lo = target_float[0];
4601 + }
4602 + }
4603 +
4604 + if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
4605 + && ((GET_MODE (x) == SFmode)
4606 + || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
4607 + {
4608 + if (code == 'm')
4609 + fprintf (stream, "%ld", hi);
4610 + else
4611 + fprintf (stream, "%ld", lo);
4612 + }
4613 + else
4614 + {
4615 + fprintf (stream, "value too large");
4616 + }
4617 + break;
4618 + }
4619 + case CONST:
4620 + output_addr_const (stream, XEXP (XEXP (x, 0), 0));
4621 + fprintf (stream, "+%ld", INTVAL (XEXP (XEXP (x, 0), 1)));
4622 + break;
4623 + case REG:
4624 + /* Swap register name if the register is DImode or DFmode. */
4625 + if (GET_MODE (x) == DImode || GET_MODE (x) == DFmode)
4626 + {
4627 + /* Double register must have an even numbered address */
4628 + gcc_assert (!(REGNO (x) % 2));
4629 + if (code == 'm')
4630 + fputs (reg_names[true_regnum (x)], stream);
4631 + else
4632 + fputs (reg_names[true_regnum (x) + 1], stream);
4633 + }
4634 + else if (GET_MODE (x) == TImode)
4635 + {
4636 + switch (code)
4637 + {
4638 + case 'T':
4639 + fputs (reg_names[true_regnum (x)], stream);
4640 + break;
4641 + case 'U':
4642 + fputs (reg_names[true_regnum (x) + 1], stream);
4643 + break;
4644 + case 'L':
4645 + fputs (reg_names[true_regnum (x) + 2], stream);
4646 + break;
4647 + case 'B':
4648 + fputs (reg_names[true_regnum (x) + 3], stream);
4649 + break;
4650 + default:
4651 + fprintf (stream, "%s, %s, %s, %s",
4652 + reg_names[true_regnum (x) + 3],
4653 + reg_names[true_regnum (x) + 2],
4654 + reg_names[true_regnum (x) + 1],
4655 + reg_names[true_regnum (x)]);
4656 + break;
4657 + }
4658 + }
4659 + else
4660 + {
4661 + fputs (reg_names[true_regnum (x)], stream);
4662 + }
4663 + break;
4664 + case CODE_LABEL:
4665 + case LABEL_REF:
4666 + case SYMBOL_REF:
4667 + output_addr_const (stream, x);
4668 + break;
4669 + case MEM:
4670 + switch (GET_CODE (XEXP (x, 0)))
4671 + {
4672 + case LABEL_REF:
4673 + case SYMBOL_REF:
4674 + output_addr_const (stream, XEXP (x, 0));
4675 + break;
4676 + case MEM:
4677 + switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
4678 + {
4679 + case SYMBOL_REF:
4680 + output_addr_const (stream, XEXP (XEXP (x, 0), 0));
4681 + break;
4682 + default:
4683 + error = 1;
4684 + break;
4685 + }
4686 + break;
4687 + case REG:
4688 + avr32_print_operand (stream, XEXP (x, 0), 0);
4689 + if (code != 'p')
4690 + fputs ("[0]", stream);
4691 + break;
4692 + case PRE_DEC:
4693 + fputs ("--", stream);
4694 + avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
4695 + break;
4696 + case POST_INC:
4697 + avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
4698 + fputs ("++", stream);
4699 + break;
4700 + case PLUS:
4701 + {
4702 + rtx op0 = XEXP (XEXP (x, 0), 0);
4703 + rtx op1 = XEXP (XEXP (x, 0), 1);
4704 + rtx base = NULL_RTX, offset = NULL_RTX;
4705 +
4706 + if (avr32_address_register_rtx_p (op0, 1))
4707 + {
4708 + base = op0;
4709 + offset = op1;
4710 + }
4711 + else if (avr32_address_register_rtx_p (op1, 1))
4712 + {
4713 + /* Operands are switched. */
4714 + base = op1;
4715 + offset = op0;
4716 + }
4717 +
4718 + gcc_assert (base && offset
4719 + && avr32_address_register_rtx_p (base, 1)
4720 + && avr32_legitimate_index_p (GET_MODE (x), offset,
4721 + 1));
4722 +
4723 + avr32_print_operand (stream, base, 0);
4724 + fputs ("[", stream);
4725 + avr32_print_operand (stream, offset, 0);
4726 + fputs ("]", stream);
4727 + break;
4728 + }
4729 + case CONST:
4730 + output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0));
4731 + fprintf (stream, " + %ld",
4732 + INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)));
4733 + break;
4734 + default:
4735 + error = 1;
4736 + }
4737 + break;
4738 + case MULT:
4739 + {
4740 + int value = INTVAL (XEXP (x, 1));
4741 +
4742 + /* Convert immediate in multiplication into a shift immediate */
4743 + switch (value)
4744 + {
4745 + case 2:
4746 + value = 1;
4747 + break;
4748 + case 4:
4749 + value = 2;
4750 + break;
4751 + case 8:
4752 + value = 3;
4753 + break;
4754 + default:
4755 + value = 0;
4756 + }
4757 + fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
4758 + value);
4759 + break;
4760 + }
4761 + case ASHIFT:
4762 + if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4763 + fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
4764 + (int) INTVAL (XEXP (x, 1)));
4765 + else if (REG_P (XEXP (x, 1)))
4766 + fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))],
4767 + reg_names[true_regnum (XEXP (x, 1))]);
4768 + else
4769 + {
4770 + error = 1;
4771 + }
4772 + break;
4773 + case LSHIFTRT:
4774 + if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4775 + fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))],
4776 + (int) INTVAL (XEXP (x, 1)));
4777 + else if (REG_P (XEXP (x, 1)))
4778 + fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))],
4779 + reg_names[true_regnum (XEXP (x, 1))]);
4780 + else
4781 + {
4782 + error = 1;
4783 + }
4784 + fprintf (stream, ">>");
4785 + break;
4786 + case PARALLEL:
4787 + {
4788 + /* Load store multiple */
4789 + int i;
4790 + int count = XVECLEN (x, 0);
4791 + int reglist16 = 0;
4792 + char reglist16_string[100];
4793 +
4794 + for (i = 0; i < count; ++i)
4795 + {
4796 + rtx vec_elm = XVECEXP (x, 0, i);
4797 + if (GET_MODE (vec_elm) != SET)
4798 + {
4799 + debug_rtx (vec_elm);
4800 + internal_error ("Unknown element in parallel expression!");
4801 + }
4802 + if (GET_MODE (XEXP (vec_elm, 0)) == REG)
4803 + {
4804 + /* Load multiple */
4805 + reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0)));
4806 + }
4807 + else
4808 + {
4809 + /* Store multiple */
4810 + reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1)));
4811 + }
4812 + }
4813 +
4814 + avr32_make_reglist16 (reglist16, reglist16_string);
4815 + fputs (reglist16_string, stream);
4816 +
4817 + break;
4818 + }
4819 +
4820 + default:
4821 + error = 1;
4822 + }
4823 +
4824 + if (error)
4825 + {
4826 + debug_rtx (x);
4827 + internal_error ("Illegal expression for avr32_print_operand");
4828 + }
4829 +}
4830 +
4831 +rtx
4832 +avr32_get_note_reg_equiv (rtx insn)
4833 +{
4834 + rtx note;
4835 +
4836 + note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
4837 +
4838 + if (note != NULL_RTX)
4839 + return XEXP (note, 0);
4840 + else
4841 + return NULL_RTX;
4842 +}
4843 +
4844 +/*
4845 + Outputs to stdio stream stream the assembler syntax for an instruction
4846 + operand that is a memory reference whose address is x. x is an RTL
4847 + expression.
4848 +
4849 + ToDo: fixme.
4850 +*/
4851 +void
4852 +avr32_print_operand_address (FILE * stream, rtx x)
4853 +{
4854 + fprintf (stream, "(%d) /* address */", REGNO (x));
4855 +}
4856 +
4857 +/* Return true if _GLOBAL_OFFSET_TABLE_ symbol is mentioned. */
4858 +bool
4859 +avr32_got_mentioned_p (rtx addr)
4860 +{
4861 + if (GET_CODE (addr) == MEM)
4862 + addr = XEXP (addr, 0);
4863 + while (GET_CODE (addr) == CONST)
4864 + addr = XEXP (addr, 0);
4865 + if (GET_CODE (addr) == SYMBOL_REF)
4866 + {
4867 + return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_");
4868 + }
4869 + if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS)
4870 + {
4871 + bool l1, l2;
4872 +
4873 + l1 = avr32_got_mentioned_p (XEXP (addr, 0));
4874 + l2 = avr32_got_mentioned_p (XEXP (addr, 1));
4875 + return l1 || l2;
4876 + }
4877 + return false;
4878 +}
4879 +
4880 +
4881 +/* Find the symbol in an address expression. */
4882 +
4883 +rtx
4884 +avr32_find_symbol (rtx addr)
4885 +{
4886 + if (GET_CODE (addr) == MEM)
4887 + addr = XEXP (addr, 0);
4888 +
4889 + while (GET_CODE (addr) == CONST)
4890 + addr = XEXP (addr, 0);
4891 +
4892 + if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
4893 + return addr;
4894 + if (GET_CODE (addr) == PLUS)
4895 + {
4896 + rtx l1, l2;
4897 +
4898 + l1 = avr32_find_symbol (XEXP (addr, 0));
4899 + l2 = avr32_find_symbol (XEXP (addr, 1));
4900 + if (l1 != NULL_RTX && l2 == NULL_RTX)
4901 + return l1;
4902 + else if (l1 == NULL_RTX && l2 != NULL_RTX)
4903 + return l2;
4904 + }
4905 +
4906 + return NULL_RTX;
4907 +}
4908 +
4909 +
4910 +/* Routines for manipulation of the constant pool. */
4911 +
4912 +/* AVR32 instructions cannot load a large constant directly into a
4913 + register; they have to come from a pc relative load. The constant
4914 + must therefore be placed in the addressable range of the pc
4915 + relative load. Depending on the precise pc relative load
4916 + instruction the range is somewhere between 256 bytes and 4k. This
4917 + means that we often have to dump a constant inside a function, and
4918 + generate code to branch around it.
4919 +
4920 + It is important to minimize this, since the branches will slow
4921 + things down and make the code larger.
4922 +
4923 + Normally we can hide the table after an existing unconditional
4924 + branch so that there is no interruption of the flow, but in the
4925 + worst case the code looks like this:
4926 +
4927 + lddpc rn, L1
4928 + ...
4929 + rjmp L2
4930 + align
4931 + L1: .long value
4932 + L2:
4933 + ...
4934 +
4935 + lddpc rn, L3
4936 + ...
4937 + rjmp L4
4938 + align
4939 + L3: .long value
4940 + L4:
4941 + ...
4942 +
4943 + We fix this by performing a scan after scheduling, which notices
4944 + which instructions need to have their operands fetched from the
4945 + constant table and builds the table.
4946 +
4947 + The algorithm starts by building a table of all the constants that
4948 + need fixing up and all the natural barriers in the function (places
4949 + where a constant table can be dropped without breaking the flow).
4950 + For each fixup we note how far the pc-relative replacement will be
4951 + able to reach and the offset of the instruction into the function.
4952 +
4953 + Having built the table we then group the fixes together to form
4954 + tables that are as large as possible (subject to addressing
4955 + constraints) and emit each table of constants after the last
4956 + barrier that is within range of all the instructions in the group.
4957 + If a group does not contain a barrier, then we forcibly create one
4958 + by inserting a jump instruction into the flow. Once the table has
4959 + been inserted, the insns are then modified to reference the
4960 + relevant entry in the pool.
4961 +
4962 + Possible enhancements to the algorithm (not implemented) are:
4963 +
4964 + 1) For some processors and object formats, there may be benefit in
4965 + aligning the pools to the start of cache lines; this alignment
4966 + would need to be taken into account when calculating addressability
4967 + of a pool. */
4968 +
4969 +/* These typedefs are located at the start of this file, so that
4970 + they can be used in the prototypes there. This comment is to
4971 + remind readers of that fact so that the following structures
4972 + can be understood more easily.
4973 +
4974 + typedef struct minipool_node Mnode;
4975 + typedef struct minipool_fixup Mfix; */
4976 +
4977 +struct minipool_node
4978 +{
4979 + /* Doubly linked chain of entries. */
4980 + Mnode *next;
4981 + Mnode *prev;
4982 + /* The maximum offset into the code that this entry can be placed. While
4983 + pushing fixes for forward references, all entries are sorted in order of
4984 + increasing max_address. */
4985 + HOST_WIDE_INT max_address;
4986 + /* Similarly for an entry inserted for a backwards ref. */
4987 + HOST_WIDE_INT min_address;
4988 + /* The number of fixes referencing this entry. This can become zero if we
4989 + "unpush" an entry. In this case we ignore the entry when we come to
4990 + emit the code. */
4991 + int refcount;
4992 + /* The offset from the start of the minipool. */
4993 + HOST_WIDE_INT offset;
4994 + /* The value in table. */
4995 + rtx value;
4996 + /* The mode of value. */
4997 + enum machine_mode mode;
4998 + /* The size of the value. */
4999 + int fix_size;
5000 +};
5001 +
5002 +struct minipool_fixup
5003 +{
5004 + Mfix *next;
5005 + rtx insn;
5006 + HOST_WIDE_INT address;
5007 + rtx *loc;
5008 + enum machine_mode mode;
5009 + int fix_size;
5010 + rtx value;
5011 + Mnode *minipool;
5012 + HOST_WIDE_INT forwards;
5013 + HOST_WIDE_INT backwards;
5014 +};
5015 +
5016 +
5017 +/* Fixes less than a word need padding out to a word boundary. */
5018 +#define MINIPOOL_FIX_SIZE(mode, value) \
5019 + (IS_FORCE_MINIPOOL(value) ? 0 : \
5020 + (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4))
5021 +
5022 +#define IS_FORCE_MINIPOOL(x) \
5023 + (GET_CODE(x) == UNSPEC && \
5024 + XINT(x, 1) == UNSPEC_FORCE_MINIPOOL)
5025 +
5026 +static Mnode *minipool_vector_head;
5027 +static Mnode *minipool_vector_tail;
5028 +
5029 +/* The linked list of all minipool fixes required for this function. */
5030 +Mfix *minipool_fix_head;
5031 +Mfix *minipool_fix_tail;
5032 +/* The fix entry for the current minipool, once it has been placed. */
5033 +Mfix *minipool_barrier;
5034 +
5035 +/* Determines if INSN is the start of a jump table. Returns the end
5036 + of the TABLE or NULL_RTX. */
5037 +static rtx
5038 +is_jump_table (rtx insn)
5039 +{
5040 + rtx table;
5041 +
5042 + if (GET_CODE (insn) == JUMP_INSN
5043 + && JUMP_LABEL (insn) != NULL
5044 + && ((table = next_real_insn (JUMP_LABEL (insn)))
5045 + == next_real_insn (insn))
5046 + && table != NULL
5047 + && GET_CODE (table) == JUMP_INSN
5048 + && (GET_CODE (PATTERN (table)) == ADDR_VEC
5049 + || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
5050 + return table;
5051 +
5052 + return NULL_RTX;
5053 +}
5054 +
5055 +static HOST_WIDE_INT
5056 +get_jump_table_size (rtx insn)
5057 +{
5058 + /* ADDR_VECs only take room if read-only data does into the text section. */
5059 + if (JUMP_TABLES_IN_TEXT_SECTION
5060 +#if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
5061 + || 1
5062 +#endif
5063 + )
5064 + {
5065 + rtx body = PATTERN (insn);
5066 + int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
5067 +
5068 + return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
5069 + }
5070 +
5071 + return 0;
5072 +}
5073 +
5074 +/* Move a minipool fix MP from its current location to before MAX_MP.
5075 + If MAX_MP is NULL, then MP doesn't need moving, but the addressing
5076 + constraints may need updating. */
5077 +static Mnode *
5078 +move_minipool_fix_forward_ref (Mnode * mp, Mnode * max_mp,
5079 + HOST_WIDE_INT max_address)
5080 +{
5081 + /* This should never be true and the code below assumes these are
5082 + different. */
5083 + if (mp == max_mp)
5084 + abort ();
5085 +
5086 + if (max_mp == NULL)
5087 + {
5088 + if (max_address < mp->max_address)
5089 + mp->max_address = max_address;
5090 + }
5091 + else
5092 + {
5093 + if (max_address > max_mp->max_address - mp->fix_size)
5094 + mp->max_address = max_mp->max_address - mp->fix_size;
5095 + else
5096 + mp->max_address = max_address;
5097 +
5098 + /* Unlink MP from its current position. Since max_mp is non-null,
5099 + mp->prev must be non-null. */
5100 + mp->prev->next = mp->next;
5101 + if (mp->next != NULL)
5102 + mp->next->prev = mp->prev;
5103 + else
5104 + minipool_vector_tail = mp->prev;
5105 +
5106 + /* Re-insert it before MAX_MP. */
5107 + mp->next = max_mp;
5108 + mp->prev = max_mp->prev;
5109 + max_mp->prev = mp;
5110 +
5111 + if (mp->prev != NULL)
5112 + mp->prev->next = mp;
5113 + else
5114 + minipool_vector_head = mp;
5115 + }
5116 +
5117 + /* Save the new entry. */
5118 + max_mp = mp;
5119 +
5120 + /* Scan over the preceding entries and adjust their addresses as required.
5121 + */
5122 + while (mp->prev != NULL
5123 + && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
5124 + {
5125 + mp->prev->max_address = mp->max_address - mp->prev->fix_size;
5126 + mp = mp->prev;
5127 + }
5128 +
5129 + return max_mp;
5130 +}
5131 +
5132 +/* Add a constant to the minipool for a forward reference. Returns the
5133 + node added or NULL if the constant will not fit in this pool. */
5134 +static Mnode *
5135 +add_minipool_forward_ref (Mfix * fix)
5136 +{
5137 + /* If set, max_mp is the first pool_entry that has a lower constraint than
5138 + the one we are trying to add. */
5139 + Mnode *max_mp = NULL;
5140 + HOST_WIDE_INT max_address = fix->address + fix->forwards;
5141 + Mnode *mp;
5142 +
5143 + /* If this fix's address is greater than the address of the first entry,
5144 + then we can't put the fix in this pool. We subtract the size of the
5145 + current fix to ensure that if the table is fully packed we still have
5146 + enough room to insert this value by suffling the other fixes forwards. */
5147 + if (minipool_vector_head &&
5148 + fix->address >= minipool_vector_head->max_address - fix->fix_size)
5149 + return NULL;
5150 +
5151 + /* Scan the pool to see if a constant with the same value has already been
5152 + added. While we are doing this, also note the location where we must
5153 + insert the constant if it doesn't already exist. */
5154 + for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5155 + {
5156 + if (GET_CODE (fix->value) == GET_CODE (mp->value)
5157 + && fix->mode == mp->mode
5158 + && (GET_CODE (fix->value) != CODE_LABEL
5159 + || (CODE_LABEL_NUMBER (fix->value)
5160 + == CODE_LABEL_NUMBER (mp->value)))
5161 + && rtx_equal_p (fix->value, mp->value))
5162 + {
5163 + /* More than one fix references this entry. */
5164 + mp->refcount++;
5165 + return move_minipool_fix_forward_ref (mp, max_mp, max_address);
5166 + }
5167 +
5168 + /* Note the insertion point if necessary. */
5169 + if (max_mp == NULL && mp->max_address > max_address)
5170 + max_mp = mp;
5171 +
5172 + }
5173 +
5174 + /* The value is not currently in the minipool, so we need to create a new
5175 + entry for it. If MAX_MP is NULL, the entry will be put on the end of
5176 + the list since the placement is less constrained than any existing
5177 + entry. Otherwise, we insert the new fix before MAX_MP and, if
5178 + necessary, adjust the constraints on the other entries. */
5179 + mp = xmalloc (sizeof (*mp));
5180 + mp->fix_size = fix->fix_size;
5181 + mp->mode = fix->mode;
5182 + mp->value = fix->value;
5183 + mp->refcount = 1;
5184 + /* Not yet required for a backwards ref. */
5185 + mp->min_address = -65536;
5186 +
5187 + if (max_mp == NULL)
5188 + {
5189 + mp->max_address = max_address;
5190 + mp->next = NULL;
5191 + mp->prev = minipool_vector_tail;
5192 +
5193 + if (mp->prev == NULL)
5194 + {
5195 + minipool_vector_head = mp;
5196 + minipool_vector_label = gen_label_rtx ();
5197 + }
5198 + else
5199 + mp->prev->next = mp;
5200 +
5201 + minipool_vector_tail = mp;
5202 + }
5203 + else
5204 + {
5205 + if (max_address > max_mp->max_address - mp->fix_size)
5206 + mp->max_address = max_mp->max_address - mp->fix_size;
5207 + else
5208 + mp->max_address = max_address;
5209 +
5210 + mp->next = max_mp;
5211 + mp->prev = max_mp->prev;
5212 + max_mp->prev = mp;
5213 + if (mp->prev != NULL)
5214 + mp->prev->next = mp;
5215 + else
5216 + minipool_vector_head = mp;
5217 + }
5218 +
5219 + /* Save the new entry. */
5220 + max_mp = mp;
5221 +
5222 + /* Scan over the preceding entries and adjust their addresses as required.
5223 + */
5224 + while (mp->prev != NULL
5225 + && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
5226 + {
5227 + mp->prev->max_address = mp->max_address - mp->prev->fix_size;
5228 + mp = mp->prev;
5229 + }
5230 +
5231 + return max_mp;
5232 +}
5233 +
5234 +static Mnode *
5235 +move_minipool_fix_backward_ref (Mnode * mp, Mnode * min_mp,
5236 + HOST_WIDE_INT min_address)
5237 +{
5238 + HOST_WIDE_INT offset;
5239 +
5240 + /* This should never be true, and the code below assumes these are
5241 + different. */
5242 + if (mp == min_mp)
5243 + abort ();
5244 +
5245 + if (min_mp == NULL)
5246 + {
5247 + if (min_address > mp->min_address)
5248 + mp->min_address = min_address;
5249 + }
5250 + else
5251 + {
5252 + /* We will adjust this below if it is too loose. */
5253 + mp->min_address = min_address;
5254 +
5255 + /* Unlink MP from its current position. Since min_mp is non-null,
5256 + mp->next must be non-null. */
5257 + mp->next->prev = mp->prev;
5258 + if (mp->prev != NULL)
5259 + mp->prev->next = mp->next;
5260 + else
5261 + minipool_vector_head = mp->next;
5262 +
5263 + /* Reinsert it after MIN_MP. */
5264 + mp->prev = min_mp;
5265 + mp->next = min_mp->next;
5266 + min_mp->next = mp;
5267 + if (mp->next != NULL)
5268 + mp->next->prev = mp;
5269 + else
5270 + minipool_vector_tail = mp;
5271 + }
5272 +
5273 + min_mp = mp;
5274 +
5275 + offset = 0;
5276 + for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5277 + {
5278 + mp->offset = offset;
5279 + if (mp->refcount > 0)
5280 + offset += mp->fix_size;
5281 +
5282 + if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
5283 + mp->next->min_address = mp->min_address + mp->fix_size;
5284 + }
5285 +
5286 + return min_mp;
5287 +}
5288 +
5289 +/* Add a constant to the minipool for a backward reference. Returns the
5290 + node added or NULL if the constant will not fit in this pool.
5291 +
5292 + Note that the code for insertion for a backwards reference can be
5293 + somewhat confusing because the calculated offsets for each fix do
5294 + not take into account the size of the pool (which is still under
5295 + construction. */
5296 +static Mnode *
5297 +add_minipool_backward_ref (Mfix * fix)
5298 +{
5299 + /* If set, min_mp is the last pool_entry that has a lower constraint than
5300 + the one we are trying to add. */
5301 + Mnode *min_mp = NULL;
5302 + /* This can be negative, since it is only a constraint. */
5303 + HOST_WIDE_INT min_address = fix->address - fix->backwards;
5304 + Mnode *mp;
5305 +
5306 + /* If we can't reach the current pool from this insn, or if we can't insert
5307 + this entry at the end of the pool without pushing other fixes out of
5308 + range, then we don't try. This ensures that we can't fail later on. */
5309 + if (min_address >= minipool_barrier->address
5310 + || (minipool_vector_tail->min_address + fix->fix_size
5311 + >= minipool_barrier->address))
5312 + return NULL;
5313 +
5314 + /* Scan the pool to see if a constant with the same value has already been
5315 + added. While we are doing this, also note the location where we must
5316 + insert the constant if it doesn't already exist. */
5317 + for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
5318 + {
5319 + if (GET_CODE (fix->value) == GET_CODE (mp->value)
5320 + && fix->mode == mp->mode
5321 + && (GET_CODE (fix->value) != CODE_LABEL
5322 + || (CODE_LABEL_NUMBER (fix->value)
5323 + == CODE_LABEL_NUMBER (mp->value)))
5324 + && rtx_equal_p (fix->value, mp->value)
5325 + /* Check that there is enough slack to move this entry to the end
5326 + of the table (this is conservative). */
5327 + && (mp->max_address
5328 + > (minipool_barrier->address
5329 + + minipool_vector_tail->offset
5330 + + minipool_vector_tail->fix_size)))
5331 + {
5332 + mp->refcount++;
5333 + return move_minipool_fix_backward_ref (mp, min_mp, min_address);
5334 + }
5335 +
5336 + if (min_mp != NULL)
5337 + mp->min_address += fix->fix_size;
5338 + else
5339 + {
5340 + /* Note the insertion point if necessary. */
5341 + if (mp->min_address < min_address)
5342 + {
5343 + min_mp = mp;
5344 + }
5345 + else if (mp->max_address
5346 + < minipool_barrier->address + mp->offset + fix->fix_size)
5347 + {
5348 + /* Inserting before this entry would push the fix beyond its
5349 + maximum address (which can happen if we have re-located a
5350 + forwards fix); force the new fix to come after it. */
5351 + min_mp = mp;
5352 + min_address = mp->min_address + fix->fix_size;
5353 + }
5354 + }
5355 + }
5356 +
5357 + /* We need to create a new entry. */
5358 + mp = xmalloc (sizeof (*mp));
5359 + mp->fix_size = fix->fix_size;
5360 + mp->mode = fix->mode;
5361 + mp->value = fix->value;
5362 + mp->refcount = 1;
5363 + mp->max_address = minipool_barrier->address + 65536;
5364 +
5365 + mp->min_address = min_address;
5366 +
5367 + if (min_mp == NULL)
5368 + {
5369 + mp->prev = NULL;
5370 + mp->next = minipool_vector_head;
5371 +
5372 + if (mp->next == NULL)
5373 + {
5374 + minipool_vector_tail = mp;
5375 + minipool_vector_label = gen_label_rtx ();
5376 + }
5377 + else
5378 + mp->next->prev = mp;
5379 +
5380 + minipool_vector_head = mp;
5381 + }
5382 + else
5383 + {
5384 + mp->next = min_mp->next;
5385 + mp->prev = min_mp;
5386 + min_mp->next = mp;
5387 +
5388 + if (mp->next != NULL)
5389 + mp->next->prev = mp;
5390 + else
5391 + minipool_vector_tail = mp;
5392 + }
5393 +
5394 + /* Save the new entry. */
5395 + min_mp = mp;
5396 +
5397 + if (mp->prev)
5398 + mp = mp->prev;
5399 + else
5400 + mp->offset = 0;
5401 +
5402 + /* Scan over the following entries and adjust their offsets. */
5403 + while (mp->next != NULL)
5404 + {
5405 + if (mp->next->min_address < mp->min_address + mp->fix_size)
5406 + mp->next->min_address = mp->min_address + mp->fix_size;
5407 +
5408 + if (mp->refcount)
5409 + mp->next->offset = mp->offset + mp->fix_size;
5410 + else
5411 + mp->next->offset = mp->offset;
5412 +
5413 + mp = mp->next;
5414 + }
5415 +
5416 + return min_mp;
5417 +}
5418 +
5419 +static void
5420 +assign_minipool_offsets (Mfix * barrier)
5421 +{
5422 + HOST_WIDE_INT offset = 0;
5423 + Mnode *mp;
5424 +
5425 + minipool_barrier = barrier;
5426 +
5427 + for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
5428 + {
5429 + mp->offset = offset;
5430 +
5431 + if (mp->refcount > 0)
5432 + offset += mp->fix_size;
5433 + }
5434 +}
5435 +
5436 +/* Print a symbolic form of X to the debug file, F. */
5437 +static void
5438 +avr32_print_value (FILE * f, rtx x)
5439 +{
5440 + switch (GET_CODE (x))
5441 + {
5442 + case CONST_INT:
5443 + fprintf (f, "0x%x", (int) INTVAL (x));
5444 + return;
5445 +
5446 + case CONST_DOUBLE:
5447 + fprintf (f, "<0x%lx,0x%lx>", (long) XWINT (x, 2), (long) XWINT (x, 3));
5448 + return;
5449 +
5450 + case CONST_VECTOR:
5451 + {
5452 + int i;
5453 +
5454 + fprintf (f, "<");
5455 + for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
5456 + {
5457 + fprintf (f, "0x%x", (int) INTVAL (CONST_VECTOR_ELT (x, i)));
5458 + if (i < (CONST_VECTOR_NUNITS (x) - 1))
5459 + fputc (',', f);
5460 + }
5461 + fprintf (f, ">");
5462 + }
5463 + return;
5464 +
5465 + case CONST_STRING:
5466 + fprintf (f, "\"%s\"", XSTR (x, 0));
5467 + return;
5468 +
5469 + case SYMBOL_REF:
5470 + fprintf (f, "`%s'", XSTR (x, 0));
5471 + return;
5472 +
5473 + case LABEL_REF:
5474 + fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
5475 + return;
5476 +
5477 + case CONST:
5478 + avr32_print_value (f, XEXP (x, 0));
5479 + return;
5480 +
5481 + case PLUS:
5482 + avr32_print_value (f, XEXP (x, 0));
5483 + fprintf (f, "+");
5484 + avr32_print_value (f, XEXP (x, 1));
5485 + return;
5486 +
5487 + case PC:
5488 + fprintf (f, "pc");
5489 + return;
5490 +
5491 + default:
5492 + fprintf (f, "????");
5493 + return;
5494 + }
5495 +}
5496 +
5497 +int
5498 +is_minipool_label (rtx label)
5499 +{
5500 + minipool_labels *cur_mp_label = cfun->machine->minipool_label_head;
5501 +
5502 + if (GET_CODE (label) != CODE_LABEL)
5503 + return FALSE;
5504 +
5505 + while (cur_mp_label)
5506 + {
5507 + if (CODE_LABEL_NUMBER (label)
5508 + == CODE_LABEL_NUMBER (cur_mp_label->label))
5509 + return TRUE;
5510 + cur_mp_label = cur_mp_label->next;
5511 + }
5512 + return FALSE;
5513 +}
5514 +
5515 +static void
5516 +new_minipool_label (rtx label)
5517 +{
5518 + if (!cfun->machine->minipool_label_head)
5519 + {
5520 + cfun->machine->minipool_label_head =
5521 + ggc_alloc (sizeof (minipool_labels));
5522 + cfun->machine->minipool_label_tail = cfun->machine->minipool_label_head;
5523 + cfun->machine->minipool_label_head->label = label;
5524 + cfun->machine->minipool_label_head->next = 0;
5525 + cfun->machine->minipool_label_head->prev = 0;
5526 + }
5527 + else
5528 + {
5529 + cfun->machine->minipool_label_tail->next =
5530 + ggc_alloc (sizeof (minipool_labels));
5531 + cfun->machine->minipool_label_tail->next->label = label;
5532 + cfun->machine->minipool_label_tail->next->next = 0;
5533 + cfun->machine->minipool_label_tail->next->prev =
5534 + cfun->machine->minipool_label_tail;
5535 + cfun->machine->minipool_label_tail =
5536 + cfun->machine->minipool_label_tail->next;
5537 + }
5538 +}
5539 +
5540 +/* Output the literal table */
5541 +static void
5542 +dump_minipool (rtx scan)
5543 +{
5544 + Mnode *mp;
5545 + Mnode *nmp;
5546 +
5547 + if (dump_file)
5548 + fprintf (dump_file,
5549 + ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
5550 + INSN_UID (scan), (unsigned long) minipool_barrier->address, 4);
5551 +
5552 + scan = emit_insn_after (gen_consttable_start (), scan);
5553 + scan = emit_insn_after (gen_align_4 (), scan);
5554 + scan = emit_label_after (minipool_vector_label, scan);
5555 + new_minipool_label (minipool_vector_label);
5556 +
5557 + for (mp = minipool_vector_head; mp != NULL; mp = nmp)
5558 + {
5559 + if (mp->refcount > 0)
5560 + {
5561 + if (dump_file)
5562 + {
5563 + fprintf (dump_file,
5564 + ";; Offset %u, min %ld, max %ld ",
5565 + (unsigned) mp->offset, (unsigned long) mp->min_address,
5566 + (unsigned long) mp->max_address);
5567 + avr32_print_value (dump_file, mp->value);
5568 + fputc ('\n', dump_file);
5569 + }
5570 +
5571 + switch (mp->fix_size)
5572 + {
5573 +#ifdef HAVE_consttable_4
5574 + case 4:
5575 + scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
5576 + break;
5577 +
5578 +#endif
5579 +#ifdef HAVE_consttable_8
5580 + case 8:
5581 + scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
5582 + break;
5583 +
5584 +#endif
5585 + case 0:
5586 + /* This can happen for force-minipool entries which just are
5587 + there to force the minipool to be generate. */
5588 + break;
5589 + default:
5590 + abort ();
5591 + break;
5592 + }
5593 + }
5594 +
5595 + nmp = mp->next;
5596 + free (mp);
5597 + }
5598 +
5599 + minipool_vector_head = minipool_vector_tail = NULL;
5600 + scan = emit_insn_after (gen_consttable_end (), scan);
5601 + scan = emit_barrier_after (scan);
5602 +}
5603 +
5604 +/* Return the cost of forcibly inserting a barrier after INSN. */
5605 +static int
5606 +avr32_barrier_cost (rtx insn)
5607 +{
5608 + /* Basing the location of the pool on the loop depth is preferable, but at
5609 + the moment, the basic block information seems to be corrupt by this
5610 + stage of the compilation. */
5611 + int base_cost = 50;
5612 + rtx next = next_nonnote_insn (insn);
5613 +
5614 + if (next != NULL && GET_CODE (next) == CODE_LABEL)
5615 + base_cost -= 20;
5616 +
5617 + switch (GET_CODE (insn))
5618 + {
5619 + case CODE_LABEL:
5620 + /* It will always be better to place the table before the label, rather
5621 + than after it. */
5622 + return 50;
5623 +
5624 + case INSN:
5625 + case CALL_INSN:
5626 + return base_cost;
5627 +
5628 + case JUMP_INSN:
5629 + return base_cost - 10;
5630 +
5631 + default:
5632 + return base_cost + 10;
5633 + }
5634 +}
5635 +
5636 +/* Find the best place in the insn stream in the range
5637 + (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
5638 + Create the barrier by inserting a jump and add a new fix entry for
5639 + it. */
5640 +static Mfix *
5641 +create_fix_barrier (Mfix * fix, HOST_WIDE_INT max_address)
5642 +{
5643 + HOST_WIDE_INT count = 0;
5644 + rtx barrier;
5645 + rtx from = fix->insn;
5646 + rtx selected = from;
5647 + int selected_cost;
5648 + HOST_WIDE_INT selected_address;
5649 + Mfix *new_fix;
5650 + HOST_WIDE_INT max_count = max_address - fix->address;
5651 + rtx label = gen_label_rtx ();
5652 +
5653 + selected_cost = avr32_barrier_cost (from);
5654 + selected_address = fix->address;
5655 +
5656 + while (from && count < max_count)
5657 + {
5658 + rtx tmp;
5659 + int new_cost;
5660 +
5661 + /* This code shouldn't have been called if there was a natural barrier
5662 + within range. */
5663 + if (GET_CODE (from) == BARRIER)
5664 + abort ();
5665 +
5666 + /* Count the length of this insn. */
5667 + count += get_attr_length (from);
5668 +
5669 + /* If there is a jump table, add its length. */
5670 + tmp = is_jump_table (from);
5671 + if (tmp != NULL)
5672 + {
5673 + count += get_jump_table_size (tmp);
5674 +
5675 + /* Jump tables aren't in a basic block, so base the cost on the
5676 + dispatch insn. If we select this location, we will still put
5677 + the pool after the table. */
5678 + new_cost = avr32_barrier_cost (from);
5679 +
5680 + if (count < max_count && new_cost <= selected_cost)
5681 + {
5682 + selected = tmp;
5683 + selected_cost = new_cost;
5684 + selected_address = fix->address + count;
5685 + }
5686 +
5687 + /* Continue after the dispatch table. */
5688 + from = NEXT_INSN (tmp);
5689 + continue;
5690 + }
5691 +
5692 + new_cost = avr32_barrier_cost (from);
5693 +
5694 + if (count < max_count && new_cost <= selected_cost)
5695 + {
5696 + selected = from;
5697 + selected_cost = new_cost;
5698 + selected_address = fix->address + count;
5699 + }
5700 +
5701 + from = NEXT_INSN (from);
5702 + }
5703 +
5704 + /* Create a new JUMP_INSN that branches around a barrier. */
5705 + from = emit_jump_insn_after (gen_jump (label), selected);
5706 + JUMP_LABEL (from) = label;
5707 + barrier = emit_barrier_after (from);
5708 + emit_label_after (label, barrier);
5709 +
5710 + /* Create a minipool barrier entry for the new barrier. */
5711 + new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*new_fix));
5712 + new_fix->insn = barrier;
5713 + new_fix->address = selected_address;
5714 + new_fix->next = fix->next;
5715 + fix->next = new_fix;
5716 +
5717 + return new_fix;
5718 +}
5719 +
5720 +/* Record that there is a natural barrier in the insn stream at
5721 + ADDRESS. */
5722 +static void
5723 +push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
5724 +{
5725 + Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
5726 +
5727 + fix->insn = insn;
5728 + fix->address = address;
5729 +
5730 + fix->next = NULL;
5731 + if (minipool_fix_head != NULL)
5732 + minipool_fix_tail->next = fix;
5733 + else
5734 + minipool_fix_head = fix;
5735 +
5736 + minipool_fix_tail = fix;
5737 +}
5738 +
5739 +/* Record INSN, which will need fixing up to load a value from the
5740 + minipool. ADDRESS is the offset of the insn since the start of the
5741 + function; LOC is a pointer to the part of the insn which requires
5742 + fixing; VALUE is the constant that must be loaded, which is of type
5743 + MODE. */
5744 +static void
5745 +push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx * loc,
5746 + enum machine_mode mode, rtx value)
5747 +{
5748 + Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
5749 + rtx body = PATTERN (insn);
5750 +
5751 + fix->insn = insn;
5752 + fix->address = address;
5753 + fix->loc = loc;
5754 + fix->mode = mode;
5755 + fix->fix_size = MINIPOOL_FIX_SIZE (mode, value);
5756 + fix->value = value;
5757 +
5758 + if (GET_CODE (body) == PARALLEL)
5759 + {
5760 + /* Mcall : Ks16 << 2 */
5761 + fix->forwards = ((1 << 15) - 1) << 2;
5762 + fix->backwards = (1 << 15) << 2;
5763 + }
5764 + else if (GET_CODE (body) == SET
5765 + && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 4)
5766 + {
5767 + /* Word Load */
5768 + if (TARGET_HARD_FLOAT
5769 + && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT)
5770 + {
5771 + /* Ldc0.w : Ku12 << 2 */
5772 + fix->forwards = ((1 << 12) - 1) << 2;
5773 + fix->backwards = 0;
5774 + }
5775 + else
5776 + {
5777 + if (optimize_size)
5778 + {
5779 + /* Lddpc : Ku7 << 2 */
5780 + fix->forwards = ((1 << 7) - 1) << 2;
5781 + fix->backwards = 0;
5782 + }
5783 + else
5784 + {
5785 + /* Ld.w : Ks16 */
5786 + fix->forwards = ((1 << 15) - 4);
5787 + fix->backwards = (1 << 15);
5788 + }
5789 + }
5790 + }
5791 + else if (GET_CODE (body) == SET
5792 + && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 8)
5793 + {
5794 + /* Double word load */
5795 + if (TARGET_HARD_FLOAT
5796 + && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT)
5797 + {
5798 + /* Ldc0.d : Ku12 << 2 */
5799 + fix->forwards = ((1 << 12) - 1) << 2;
5800 + fix->backwards = 0;
5801 + }
5802 + else
5803 + {
5804 + /* Ld.d : Ks16 */
5805 + fix->forwards = ((1 << 15) - 4);
5806 + fix->backwards = (1 << 15);
5807 + }
5808 + }
5809 + else if (GET_CODE (body) == UNSPEC_VOLATILE
5810 + && XINT (body, 1) == VUNSPEC_MVRC)
5811 + {
5812 + /* Coprocessor load */
5813 + /* Ldc : Ku8 << 2 */
5814 + fix->forwards = ((1 << 8) - 1) << 2;
5815 + fix->backwards = 0;
5816 + }
5817 + else
5818 + {
5819 + /* Assume worst case which is lddpc insn. */
5820 + fix->forwards = ((1 << 7) - 1) << 2;
5821 + fix->backwards = 0;
5822 + }
5823 +
5824 + fix->minipool = NULL;
5825 +
5826 + /* If an insn doesn't have a range defined for it, then it isn't expecting
5827 + to be reworked by this code. Better to abort now than to generate duff
5828 + assembly code. */
5829 + if (fix->forwards == 0 && fix->backwards == 0)
5830 + abort ();
5831 +
5832 + if (dump_file)
5833 + {
5834 + fprintf (dump_file,
5835 + ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
5836 + GET_MODE_NAME (mode),
5837 + INSN_UID (insn), (unsigned long) address,
5838 + -1 * (long) fix->backwards, (long) fix->forwards);
5839 + avr32_print_value (dump_file, fix->value);
5840 + fprintf (dump_file, "\n");
5841 + }
5842 +
5843 + /* Add it to the chain of fixes. */
5844 + fix->next = NULL;
5845 +
5846 + if (minipool_fix_head != NULL)
5847 + minipool_fix_tail->next = fix;
5848 + else
5849 + minipool_fix_head = fix;
5850 +
5851 + minipool_fix_tail = fix;
5852 +}
5853 +
5854 +/* Scan INSN and note any of its operands that need fixing.
5855 + If DO_PUSHES is false we do not actually push any of the fixups
5856 + needed. The function returns TRUE is any fixups were needed/pushed.
5857 + This is used by avr32_memory_load_p() which needs to know about loads
5858 + of constants that will be converted into minipool loads. */
5859 +static bool
5860 +note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
5861 +{
5862 + bool result = false;
5863 + int opno;
5864 +
5865 + extract_insn (insn);
5866 +
5867 + if (!constrain_operands (1))
5868 + fatal_insn_not_found (insn);
5869 +
5870 + if (recog_data.n_alternatives == 0)
5871 + return false;
5872 +
5873 + /* Fill in recog_op_alt with information about the constraints of this
5874 + insn. */
5875 + preprocess_constraints ();
5876 +
5877 + for (opno = 0; opno < recog_data.n_operands; opno++)
5878 + {
5879 + rtx op;
5880 +
5881 + /* Things we need to fix can only occur in inputs. */
5882 + if (recog_data.operand_type[opno] != OP_IN)
5883 + continue;
5884 +
5885 + op = recog_data.operand[opno];
5886 +
5887 + if (avr32_const_pool_ref_operand (op, GET_MODE (op)))
5888 + {
5889 + if (do_pushes)
5890 + {
5891 + rtx cop = avoid_constant_pool_reference (op);
5892 +
5893 + /* Casting the address of something to a mode narrower than a
5894 + word can cause avoid_constant_pool_reference() to return the
5895 + pool reference itself. That's no good to us here. Lets
5896 + just hope that we can use the constant pool value directly.
5897 + */
5898 + if (op == cop)
5899 + cop = get_pool_constant (XEXP (op, 0));
5900 +
5901 + push_minipool_fix (insn, address,
5902 + recog_data.operand_loc[opno],
5903 + recog_data.operand_mode[opno], cop);
5904 + }
5905 +
5906 + result = true;
5907 + }
5908 + else if (TARGET_HAS_ASM_ADDR_PSEUDOS
5909 + && avr32_address_operand (op, GET_MODE (op)))
5910 + {
5911 + /* Handle pseudo instructions using a direct address. These pseudo
5912 + instructions might need entries in the constant pool and we must
5913 + therefor create a constant pool for them, in case the
5914 + assembler/linker needs to insert entries. */
5915 + if (do_pushes)
5916 + {
5917 + /* Push a dummy constant pool entry so that the .cpool
5918 + directive should be inserted on the appropriate place in the
5919 + code even if there are no real constant pool entries. This
5920 + is used by the assembler and linker to know where to put
5921 + generated constant pool entries. */
5922 + push_minipool_fix (insn, address,
5923 + recog_data.operand_loc[opno],
5924 + recog_data.operand_mode[opno],
5925 + gen_rtx_UNSPEC (VOIDmode,
5926 + gen_rtvec (1, const0_rtx),
5927 + UNSPEC_FORCE_MINIPOOL));
5928 + result = true;
5929 + }
5930 + }
5931 + }
5932 + return result;
5933 +}
5934 +
5935 +
5936 +static int
5937 +avr32_insn_is_cast (rtx insn)
5938 +{
5939 +
5940 + if (NONJUMP_INSN_P (insn)
5941 + && GET_CODE (PATTERN (insn)) == SET
5942 + && (GET_CODE (SET_SRC (PATTERN (insn))) == ZERO_EXTEND
5943 + || GET_CODE (SET_SRC (PATTERN (insn))) == SIGN_EXTEND)
5944 + && REG_P (XEXP (SET_SRC (PATTERN (insn)), 0))
5945 + && REG_P (SET_DEST (PATTERN (insn))))
5946 + return true;
5947 + return false;
5948 +}
5949 +
5950 +/* FIXME: The level of nesting in this function is way too deep. It needs to be
5951 + torn apart. */
5952 +static void
5953 +avr32_reorg_optimization (void)
5954 +{
5955 + rtx first = get_insns ();
5956 + rtx insn;
5957 +
5958 + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
5959 + {
5960 +
5961 + /* Scan through all insns looking for cast operations. */
5962 + if (dump_file)
5963 + {
5964 + fprintf (dump_file, ";; Deleting redundant cast operations:\n");
5965 + }
5966 + for (insn = first; insn; insn = NEXT_INSN (insn))
5967 + {
5968 + rtx reg, src_reg, scan;
5969 + enum machine_mode mode;
5970 + int unused_cast;
5971 + rtx label_ref;
5972 +
5973 + if (avr32_insn_is_cast (insn)
5974 + && (GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == QImode
5975 + || GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == HImode))
5976 + {
5977 + mode = GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0));
5978 + reg = SET_DEST (PATTERN (insn));
5979 + src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
5980 + }
5981 + else
5982 + {
5983 + continue;
5984 + }
5985 +
5986 + unused_cast = false;
5987 + label_ref = NULL_RTX;
5988 + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
5989 + {
5990 + /* Check if we have reached the destination of a simple
5991 + conditional jump which we have already scanned past. If so,
5992 + we can safely continue scanning. */
5993 + if (LABEL_P (scan) && label_ref != NULL_RTX)
5994 + {
5995 + if (CODE_LABEL_NUMBER (scan) ==
5996 + CODE_LABEL_NUMBER (XEXP (label_ref, 0)))
5997 + label_ref = NULL_RTX;
5998 + else
5999 + break;
6000 + }
6001 +
6002 + if (!INSN_P (scan))
6003 + continue;
6004 +
6005 + /* For conditional jumps we can manage to keep on scanning if
6006 + we meet the destination label later on before any new jump
6007 + insns occure. */
6008 + if (GET_CODE (scan) == JUMP_INSN)
6009 + {
6010 + if (any_condjump_p (scan) && label_ref == NULL_RTX)
6011 + label_ref = condjump_label (scan);
6012 + else
6013 + break;
6014 + }
6015 +
6016 + if (!reg_mentioned_p (reg, PATTERN (scan)))
6017 + continue;
6018 +
6019 + /* Check if casted register is used in this insn */
6020 + if ((regno_use_in (REGNO (reg), PATTERN (scan)) != NULL_RTX)
6021 + && (GET_MODE (regno_use_in (REGNO (reg), PATTERN (scan))) ==
6022 + GET_MODE (reg)))
6023 + {
6024 + /* If not used in the source to the set or in a memory
6025 + expression in the destiantion then the register is used
6026 + as a destination and is really dead. */
6027 + if (single_set (scan)
6028 + && GET_CODE (PATTERN (scan)) == SET
6029 + && REG_P (SET_DEST (PATTERN (scan)))
6030 + && !regno_use_in (REGNO (reg), SET_SRC (PATTERN (scan)))
6031 + && label_ref == NULL_RTX)
6032 + {
6033 + unused_cast = true;
6034 + }
6035 + break;
6036 + }
6037 +
6038 + /* Check if register is dead or set in this insn */
6039 + if (dead_or_set_p (scan, reg))
6040 + {
6041 + unused_cast = true;
6042 + break;
6043 + }
6044 + }
6045 +
6046 + /* Check if we have unresolved conditional jumps */
6047 + if (label_ref != NULL_RTX)
6048 + continue;
6049 +
6050 + if (unused_cast)
6051 + {
6052 + if (REGNO (reg) == REGNO (XEXP (SET_SRC (PATTERN (insn)), 0)))
6053 + {
6054 + /* One operand cast, safe to delete */
6055 + if (dump_file)
6056 + {
6057 + fprintf (dump_file,
6058 + ";; INSN %i removed, casted register %i value not used.\n",
6059 + INSN_UID (insn), REGNO (reg));
6060 + }
6061 + SET_INSN_DELETED (insn);
6062 + /* Force the instruction to be recognized again */
6063 + INSN_CODE (insn) = -1;
6064 + }
6065 + else
6066 + {
6067 + /* Two operand cast, which really could be substituted with
6068 + a move, if the source register is dead after the cast
6069 + insn and then the insn which sets the source register
6070 + could instead directly set the destination register for
6071 + the cast. As long as there are no insns in between which
6072 + uses the register. */
6073 + rtx link = NULL_RTX;
6074 + rtx set;
6075 + rtx src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
6076 + unused_cast = false;
6077 +
6078 + if (!find_reg_note (insn, REG_DEAD, src_reg))
6079 + continue;
6080 +
6081 + /* Search for the insn which sets the source register */
6082 + for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
6083 + {
6084 + if (REG_NOTE_KIND (link) != 0)
6085 + continue;
6086 + set = single_set (XEXP (link, 0));
6087 + if (set && rtx_equal_p (src_reg, SET_DEST (set)))
6088 + {
6089 + link = XEXP (link, 0);
6090 + break;
6091 + }
6092 + }
6093 +
6094 + /* Found no link or link is a call insn where we can not
6095 + change the destination register */
6096 + if (link == NULL_RTX || CALL_P (link))
6097 + continue;
6098 +
6099 + /* Scan through all insn between link and insn */
6100 + for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
6101 + {
6102 + /* Don't try to trace forward past a CODE_LABEL if we
6103 + haven't seen INSN yet. Ordinarily, we will only
6104 + find the setting insn in LOG_LINKS if it is in the
6105 + same basic block. However, cross-jumping can insert
6106 + code labels in between the load and the call, and
6107 + can result in situations where a single call insn
6108 + may have two targets depending on where we came
6109 + from. */
6110 +
6111 + if (GET_CODE (scan) == CODE_LABEL)
6112 + break;
6113 +
6114 + if (!INSN_P (scan))
6115 + continue;
6116 +
6117 + /* Don't try to trace forward past a JUMP. To optimize
6118 + safely, we would have to check that all the
6119 + instructions at the jump destination did not use REG.
6120 + */
6121 +
6122 + if (GET_CODE (scan) == JUMP_INSN)
6123 + {
6124 + break;
6125 + }
6126 +
6127 + if (!reg_mentioned_p (src_reg, PATTERN (scan)))
6128 + continue;
6129 +
6130 + /* We have reached the cast insn */
6131 + if (scan == insn)
6132 + {
6133 + /* We can remove cast and replace the destination
6134 + register of the link insn with the destination
6135 + of the cast */
6136 + if (dump_file)
6137 + {
6138 + fprintf (dump_file,
6139 + ";; INSN %i removed, casted value unused. "
6140 + "Destination of removed cast operation: register %i, folded into INSN %i.\n",
6141 + INSN_UID (insn), REGNO (reg),
6142 + INSN_UID (link));
6143 + }
6144 + /* Update link insn */
6145 + SET_DEST (PATTERN (link)) =
6146 + gen_rtx_REG (mode, REGNO (reg));
6147 + /* Force the instruction to be recognized again */
6148 + INSN_CODE (link) = -1;
6149 +
6150 + /* Delete insn */
6151 + SET_INSN_DELETED (insn);
6152 + /* Force the instruction to be recognized again */
6153 + INSN_CODE (insn) = -1;
6154 + break;
6155 + }
6156 + }
6157 + }
6158 + }
6159 + }
6160 + }
6161 +
6162 + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
6163 + {
6164 +
6165 + /* Scan through all insns looking for shifted add operations */
6166 + if (dump_file)
6167 + {
6168 + fprintf (dump_file,
6169 + ";; Deleting redundant shifted add operations:\n");
6170 + }
6171 + for (insn = first; insn; insn = NEXT_INSN (insn))
6172 + {
6173 + rtx reg, mem_expr, scan, op0, op1;
6174 + int add_only_used_as_pointer;
6175 +
6176 + if (INSN_P (insn)
6177 + && GET_CODE (PATTERN (insn)) == SET
6178 + && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
6179 + && (GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == MULT
6180 + || GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == ASHIFT)
6181 + && GET_CODE (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1)) ==
6182 + CONST_INT && REG_P (SET_DEST (PATTERN (insn)))
6183 + && REG_P (XEXP (SET_SRC (PATTERN (insn)), 1))
6184 + && REG_P (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0)))
6185 + {
6186 + reg = SET_DEST (PATTERN (insn));
6187 + mem_expr = SET_SRC (PATTERN (insn));
6188 + op0 = XEXP (XEXP (mem_expr, 0), 0);
6189 + op1 = XEXP (mem_expr, 1);
6190 + }
6191 + else
6192 + {
6193 + continue;
6194 + }
6195 +
6196 + /* Scan forward the check if the result of the shifted add
6197 + operation is only used as an address in memory operations and
6198 + that the operands to the shifted add are not clobbered. */
6199 + add_only_used_as_pointer = false;
6200 + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
6201 + {
6202 + if (!INSN_P (scan))
6203 + continue;
6204 +
6205 + /* Don't try to trace forward past a JUMP or CALL. To optimize
6206 + safely, we would have to check that all the instructions at
6207 + the jump destination did not use REG. */
6208 +
6209 + if (GET_CODE (scan) == JUMP_INSN)
6210 + {
6211 + break;
6212 + }
6213 +
6214 + /* If used in a call insn then we cannot optimize it away */
6215 + if (CALL_P (scan) && find_regno_fusage (scan, USE, REGNO (reg)))
6216 + break;
6217 +
6218 + /* If any of the operands of the shifted add are clobbered we
6219 + cannot optimize the shifted adda away */
6220 + if ((reg_set_p (op0, scan) && (REGNO (op0) != REGNO (reg)))
6221 + || (reg_set_p (op1, scan) && (REGNO (op1) != REGNO (reg))))
6222 + break;
6223 +
6224 + if (!reg_mentioned_p (reg, PATTERN (scan)))
6225 + continue;
6226 +
6227 + /* If used any other place than as a pointer or as the
6228 + destination register we failed */
6229 + if (!(single_set (scan)
6230 + && GET_CODE (PATTERN (scan)) == SET
6231 + && ((MEM_P (SET_DEST (PATTERN (scan)))
6232 + && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
6233 + && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) ==
6234 + REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan)))
6235 + &&
6236 + REG_P (XEXP
6237 + (SET_SRC (PATTERN (scan)),
6238 + 0))
6239 + &&
6240 + REGNO (XEXP
6241 + (SET_SRC (PATTERN (scan)),
6242 + 0)) == REGNO (reg))))
6243 + && !(GET_CODE (PATTERN (scan)) == SET
6244 + && REG_P (SET_DEST (PATTERN (scan)))
6245 + && !regno_use_in (REGNO (reg),
6246 + SET_SRC (PATTERN (scan)))))
6247 + break;
6248 +
6249 + /* Check if register is dead or set in this insn */
6250 + if (dead_or_set_p (scan, reg))
6251 + {
6252 + add_only_used_as_pointer = true;
6253 + break;
6254 + }
6255 + }
6256 +
6257 + if (add_only_used_as_pointer)
6258 + {
6259 + /* Lets delete the add insn and replace all memory references
6260 + which uses the pointer with the full expression. */
6261 + if (dump_file)
6262 + {
6263 + fprintf (dump_file,
6264 + ";; Deleting INSN %i since address expression can be folded into all "
6265 + "memory references using this expression\n",
6266 + INSN_UID (insn));
6267 + }
6268 + SET_INSN_DELETED (insn);
6269 + /* Force the instruction to be recognized again */
6270 + INSN_CODE (insn) = -1;
6271 +
6272 + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
6273 + {
6274 + if (!INSN_P (scan))
6275 + continue;
6276 +
6277 + if (!reg_mentioned_p (reg, PATTERN (scan)))
6278 + continue;
6279 +
6280 + /* If used any other place than as a pointer or as the
6281 + destination register we failed */
6282 + if ((single_set (scan)
6283 + && GET_CODE (PATTERN (scan)) == SET
6284 + && ((MEM_P (SET_DEST (PATTERN (scan)))
6285 + && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
6286 + && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) ==
6287 + REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan)))
6288 + &&
6289 + REG_P (XEXP
6290 + (SET_SRC (PATTERN (scan)),
6291 + 0))
6292 + &&
6293 + REGNO (XEXP
6294 + (SET_SRC (PATTERN (scan)),
6295 + 0)) == REGNO (reg)))))
6296 + {
6297 + if (dump_file)
6298 + {
6299 + fprintf (dump_file,
6300 + ";; Register %i replaced by indexed address in INSN %i\n",
6301 + REGNO (reg), INSN_UID (scan));
6302 + }
6303 + if (MEM_P (SET_DEST (PATTERN (scan))))
6304 + XEXP (SET_DEST (PATTERN (scan)), 0) = mem_expr;
6305 + else
6306 + XEXP (SET_SRC (PATTERN (scan)), 0) = mem_expr;
6307 + }
6308 +
6309 + /* Check if register is dead or set in this insn */
6310 + if (dead_or_set_p (scan, reg))
6311 + {
6312 + break;
6313 + }
6314 +
6315 + }
6316 + }
6317 + }
6318 + }
6319 +}
6320 +
6321 +/* Exported to toplev.c.
6322 +
6323 + Do a final pass over the function, just before delayed branch
6324 + scheduling. */
6325 +
6326 +static void
6327 +avr32_reorg (void)
6328 +{
6329 + rtx insn;
6330 + HOST_WIDE_INT address = 0;
6331 + Mfix *fix;
6332 +
6333 + minipool_fix_head = minipool_fix_tail = NULL;
6334 +
6335 + /* The first insn must always be a note, or the code below won't scan it
6336 + properly. */
6337 + insn = get_insns ();
6338 + if (GET_CODE (insn) != NOTE)
6339 + abort ();
6340 +
6341 + /* Scan all the insns and record the operands that will need fixing. */
6342 + for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
6343 + {
6344 + if (GET_CODE (insn) == BARRIER)
6345 + push_minipool_barrier (insn, address);
6346 + else if (INSN_P (insn))
6347 + {
6348 + rtx table;
6349 +
6350 + note_invalid_constants (insn, address, true);
6351 + address += get_attr_length (insn);
6352 +
6353 + /* If the insn is a vector jump, add the size of the table and skip
6354 + the table. */
6355 + if ((table = is_jump_table (insn)) != NULL)
6356 + {
6357 + address += get_jump_table_size (table);
6358 + insn = table;
6359 + }
6360 + }
6361 + }
6362 +
6363 + fix = minipool_fix_head;
6364 +
6365 + /* Now scan the fixups and perform the required changes. */
6366 + while (fix)
6367 + {
6368 + Mfix *ftmp;
6369 + Mfix *fdel;
6370 + Mfix *last_added_fix;
6371 + Mfix *last_barrier = NULL;
6372 + Mfix *this_fix;
6373 +
6374 + /* Skip any further barriers before the next fix. */
6375 + while (fix && GET_CODE (fix->insn) == BARRIER)
6376 + fix = fix->next;
6377 +
6378 + /* No more fixes. */
6379 + if (fix == NULL)
6380 + break;
6381 +
6382 + last_added_fix = NULL;
6383 +
6384 + for (ftmp = fix; ftmp; ftmp = ftmp->next)
6385 + {
6386 + if (GET_CODE (ftmp->insn) == BARRIER)
6387 + {
6388 + if (ftmp->address >= minipool_vector_head->max_address)
6389 + break;
6390 +
6391 + last_barrier = ftmp;
6392 + }
6393 + else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
6394 + break;
6395 +
6396 + last_added_fix = ftmp; /* Keep track of the last fix added.
6397 + */
6398 + }
6399 +
6400 + /* If we found a barrier, drop back to that; any fixes that we could
6401 + have reached but come after the barrier will now go in the next
6402 + mini-pool. */
6403 + if (last_barrier != NULL)
6404 + {
6405 + /* Reduce the refcount for those fixes that won't go into this pool
6406 + after all. */
6407 + for (fdel = last_barrier->next;
6408 + fdel && fdel != ftmp; fdel = fdel->next)
6409 + {
6410 + fdel->minipool->refcount--;
6411 + fdel->minipool = NULL;
6412 + }
6413 +
6414 + ftmp = last_barrier;
6415 + }
6416 + else
6417 + {
6418 + /* ftmp is first fix that we can't fit into this pool and there no
6419 + natural barriers that we could use. Insert a new barrier in the
6420 + code somewhere between the previous fix and this one, and
6421 + arrange to jump around it. */
6422 + HOST_WIDE_INT max_address;
6423 +
6424 + /* The last item on the list of fixes must be a barrier, so we can
6425 + never run off the end of the list of fixes without last_barrier
6426 + being set. */
6427 + if (ftmp == NULL)
6428 + abort ();
6429 +
6430 + max_address = minipool_vector_head->max_address;
6431 + /* Check that there isn't another fix that is in range that we
6432 + couldn't fit into this pool because the pool was already too
6433 + large: we need to put the pool before such an instruction. */
6434 + if (ftmp->address < max_address)
6435 + max_address = ftmp->address;
6436 +
6437 + last_barrier = create_fix_barrier (last_added_fix, max_address);
6438 + }
6439 +
6440 + assign_minipool_offsets (last_barrier);
6441 +
6442 + while (ftmp)
6443 + {
6444 + if (GET_CODE (ftmp->insn) != BARRIER
6445 + && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
6446 + == NULL))
6447 + break;
6448 +
6449 + ftmp = ftmp->next;
6450 + }
6451 +
6452 + /* Scan over the fixes we have identified for this pool, fixing them up
6453 + and adding the constants to the pool itself. */
6454 + for (this_fix = fix; this_fix && ftmp != this_fix;
6455 + this_fix = this_fix->next)
6456 + if (GET_CODE (this_fix->insn) != BARRIER
6457 + /* Do nothing for entries present just to force the insertion of
6458 + a minipool. */
6459 + && !IS_FORCE_MINIPOOL (this_fix->value))
6460 + {
6461 + rtx addr = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
6462 + minipool_vector_label),
6463 + this_fix->minipool->offset);
6464 + *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
6465 + }
6466 +
6467 + dump_minipool (last_barrier->insn);
6468 + fix = ftmp;
6469 + }
6470 +
6471 + /* Free the minipool memory. */
6472 + obstack_free (&minipool_obstack, minipool_startobj);
6473 +
6474 + avr32_reorg_optimization ();
6475 +}
6476 +
6477 +
6478 +/*
6479 + Hook for doing some final scanning of instructions. Does nothing yet...*/
6480 +void
6481 +avr32_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
6482 + rtx * opvec ATTRIBUTE_UNUSED,
6483 + int noperands ATTRIBUTE_UNUSED)
6484 +{
6485 + return;
6486 +}
6487 +
6488 +
6489 +
6490 +int
6491 +avr32_expand_movcc (enum machine_mode mode, rtx operands[])
6492 +{
6493 + rtx operator;
6494 + rtx compare_op0 = avr32_compare_op0;
6495 + rtx compare_op1 = avr32_compare_op1;
6496 +
6497 + /* Only allow certain compare operations */
6498 + if (GET_MODE (compare_op0) != DImode
6499 + && GET_MODE (compare_op0) != SImode
6500 + && GET_MODE (compare_op0) != HImode && GET_MODE (compare_op0) != QImode)
6501 + return FALSE;
6502 +
6503 + if (GET_CODE (compare_op0) == MEM)
6504 + {
6505 + if (no_new_pseudos)
6506 + return FALSE;
6507 + else
6508 + compare_op0 = force_reg (GET_MODE (compare_op0), compare_op0);
6509 + }
6510 +
6511 + if (GET_CODE (compare_op1) == MEM)
6512 + {
6513 + if (no_new_pseudos)
6514 + return FALSE;
6515 + else
6516 + compare_op1 = force_reg (GET_MODE (compare_op1), compare_op1);
6517 + }
6518 +
6519 + /* For DI, HI and QI mode force comparison operands to registers */
6520 + if (GET_MODE (compare_op0) == DImode
6521 + || GET_MODE (compare_op0) == HImode || GET_MODE (compare_op0) == QImode)
6522 + {
6523 + if (GET_CODE (compare_op0) != REG)
6524 + {
6525 + if (no_new_pseudos)
6526 + return FALSE;
6527 + else
6528 + compare_op0 = force_reg (GET_MODE (compare_op0), compare_op0);
6529 + }
6530 +
6531 + if (GET_CODE (compare_op1) != REG)
6532 + {
6533 + if (no_new_pseudos)
6534 + return FALSE;
6535 + else
6536 + compare_op1 = force_reg (GET_MODE (compare_op0), compare_op1);
6537 + }
6538 + }
6539 +
6540 + /* Force any immediate compare operands for SI, larger than the L
6541 + constraint, to a register */
6542 + if (GET_MODE (compare_op0) == SImode)
6543 + {
6544 + if ((GET_CODE (compare_op0) == CONST_INT
6545 + && !avr32_const_ok_for_constraint_p (INTVAL (compare_op0), 'K',
6546 + "Ks21")))
6547 + {
6548 + if (no_new_pseudos)
6549 + return FALSE;
6550 + else
6551 + compare_op0 = force_reg (SImode, compare_op0);
6552 + }
6553 +
6554 + if ((GET_CODE (compare_op1) == CONST_INT
6555 + && !avr32_const_ok_for_constraint_p (INTVAL (compare_op1), 'K',
6556 + "Ks21")))
6557 + {
6558 + if (no_new_pseudos)
6559 + return FALSE;
6560 + else
6561 + compare_op1 = force_reg (SImode, compare_op1);
6562 + }
6563 + }
6564 +
6565 + /* If we have immediates larger than can be allowed in conditional mov
6566 + instructions, force them to registers */
6567 + if (GET_CODE (operands[2]) == CONST_INT
6568 + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08"))
6569 + {
6570 + if (no_new_pseudos)
6571 + return FALSE;
6572 + else
6573 + operands[2] = force_reg (mode, operands[2]);
6574 + }
6575 +
6576 + if (GET_CODE (operands[3]) == CONST_INT
6577 + && !avr32_const_ok_for_constraint_p (INTVAL (operands[3]), 'K', "Ks08"))
6578 + {
6579 + if (no_new_pseudos)
6580 + return FALSE;
6581 + else
6582 + operands[3] = force_reg (mode, operands[3]);
6583 + }
6584 +
6585 + /* Emit the actual instruction */
6586 + operator = gen_rtx_EQ (VOIDmode, const0_rtx, const0_rtx);
6587 + PUT_CODE (operator, GET_CODE (operands[1]));
6588 + switch (mode)
6589 + {
6590 + case SImode:
6591 + switch (GET_MODE (compare_op0))
6592 + {
6593 + case SImode:
6594 + emit_insn (gen_movsicc_cmpsi
6595 + (operands[0], operator, operands[2], operands[3],
6596 + compare_op0, compare_op1));
6597 + break;
6598 + case DImode:
6599 + emit_insn (gen_movsicc_cmpdi
6600 + (operands[0], operator, operands[2], operands[3],
6601 + compare_op0, compare_op1));
6602 + break;
6603 + case HImode:
6604 + emit_insn (gen_movsicc_cmphi
6605 + (operands[0], operator, operands[2], operands[3],
6606 + compare_op0, compare_op1));
6607 + break;
6608 + case QImode:
6609 + emit_insn (gen_movsicc_cmpqi
6610 + (operands[0], operator, operands[2], operands[3],
6611 + compare_op0, compare_op1));
6612 + break;
6613 + default:
6614 + return FALSE;
6615 + }
6616 + break;
6617 + case HImode:
6618 + switch (GET_MODE (compare_op0))
6619 + {
6620 + case SImode:
6621 + emit_insn (gen_movhicc_cmpsi
6622 + (operands[0], operator, operands[2], operands[3],
6623 + compare_op0, compare_op1));
6624 + break;
6625 + case DImode:
6626 + emit_insn (gen_movhicc_cmpdi
6627 + (operands[0], operator, operands[2], operands[3],
6628 + compare_op0, compare_op1));
6629 + break;
6630 + case HImode:
6631 + emit_insn (gen_movhicc_cmphi
6632 + (operands[0], operator, operands[2], operands[3],
6633 + compare_op0, compare_op1));
6634 + break;
6635 + case QImode:
6636 + emit_insn (gen_movhicc_cmpqi
6637 + (operands[0], operator, operands[2], operands[3],
6638 + compare_op0, compare_op1));
6639 + break;
6640 + default:
6641 + return FALSE;
6642 + }
6643 + break;
6644 + case QImode:
6645 + switch (GET_MODE (compare_op0))
6646 + {
6647 + case SImode:
6648 + emit_insn (gen_movqicc_cmpsi
6649 + (operands[0], operator, operands[2], operands[3],
6650 + compare_op0, compare_op1));
6651 + break;
6652 + case DImode:
6653 + emit_insn (gen_movqicc_cmpdi
6654 + (operands[0], operator, operands[2], operands[3],
6655 + compare_op0, compare_op1));
6656 + break;
6657 + case HImode:
6658 + emit_insn (gen_movqicc_cmphi
6659 + (operands[0], operator, operands[2], operands[3],
6660 + compare_op0, compare_op1));
6661 + break;
6662 + case QImode:
6663 + emit_insn (gen_movqicc_cmpqi
6664 + (operands[0], operator, operands[2], operands[3],
6665 + compare_op0, compare_op1));
6666 + break;
6667 + default:
6668 + return FALSE;
6669 + }
6670 + break;
6671 + default:
6672 + return FALSE;
6673 + }
6674 +
6675 + return TRUE;
6676 +}
6677 +
6678 +
6679 +int
6680 +avr32_expand_addcc (enum machine_mode mode, rtx operands[])
6681 +{
6682 + rtx operator;
6683 + rtx compare_op0 = avr32_compare_op0;
6684 + rtx compare_op1 = avr32_compare_op1;
6685 +
6686 + /* Check if we have an add/sub with an k8 immediate */
6687 + if (!(GET_CODE (operands[3]) == CONST_INT
6688 + && avr32_const_ok_for_constraint_p (-INTVAL (operands[3]), 'K',
6689 + "Ks08")))
6690 + return FALSE;
6691 + else
6692 + /* Flip sign */
6693 + operands[3] = GEN_INT (-INTVAL (operands[3]));
6694 +
6695 + /* Only allow certain compare operations */
6696 + if (GET_MODE (compare_op0) != DImode
6697 + && GET_MODE (compare_op0) != SImode
6698 + && GET_MODE (compare_op0) != HImode && GET_MODE (compare_op0) != QImode)
6699 + return FALSE;
6700 +
6701 + if (GET_CODE (compare_op0) == MEM)
6702 + {
6703 + if (no_new_pseudos)
6704 + return FALSE;
6705 + else
6706 + compare_op0 = force_reg (GET_MODE (compare_op0), compare_op0);
6707 + }
6708 +
6709 + if (GET_CODE (compare_op1) == MEM)
6710 + {
6711 + if (no_new_pseudos)
6712 + return FALSE;
6713 + else
6714 + compare_op1 = force_reg (GET_MODE (compare_op1), compare_op1);
6715 + }
6716 +
6717 + /* For DI, HI and QI mode force comparison operands to registers */
6718 + if (GET_MODE (compare_op0) == DImode
6719 + || GET_MODE (compare_op0) == HImode || GET_MODE (compare_op0) == QImode)
6720 + {
6721 + if (GET_CODE (compare_op0) != REG)
6722 + {
6723 + if (no_new_pseudos)
6724 + return FALSE;
6725 + else
6726 + compare_op0 = force_reg (GET_MODE (compare_op0), compare_op0);
6727 + }
6728 +
6729 + if (GET_CODE (compare_op1) != REG)
6730 + {
6731 + if (no_new_pseudos)
6732 + return FALSE;
6733 + else
6734 + compare_op1 = force_reg (GET_MODE (compare_op0), compare_op1);
6735 + }
6736 + }
6737 +
6738 + /* Force any immediate compare operands for SI, larger than the L
6739 + constraint, to a register */
6740 + if (GET_MODE (compare_op0) == SImode)
6741 + {
6742 + if ((GET_CODE (compare_op0) == CONST_INT
6743 + && !avr32_const_ok_for_constraint_p (INTVAL (compare_op0), 'K',
6744 + "Ks21")))
6745 + {
6746 + if (no_new_pseudos)
6747 + return FALSE;
6748 + else
6749 + compare_op0 = force_reg (SImode, compare_op0);
6750 + }
6751 +
6752 + if ((GET_CODE (compare_op1) == CONST_INT
6753 + && !avr32_const_ok_for_constraint_p (INTVAL (compare_op1), 'K',
6754 + "Ks21")))
6755 + {
6756 + if (no_new_pseudos)
6757 + return FALSE;
6758 + else
6759 + compare_op1 = force_reg (SImode, compare_op1);
6760 + }
6761 + }
6762 +
6763 + /* If we have immediates larger than can be allowed in conditional mov
6764 + instructions, force them to registers */
6765 + if (GET_CODE (operands[2]) == CONST_INT
6766 + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08"))
6767 + {
6768 + if (no_new_pseudos)
6769 + return FALSE;
6770 + else
6771 + operands[2] = force_reg (mode, operands[2]);
6772 + }
6773 +
6774 + if (GET_CODE (operands[3]) == CONST_INT
6775 + && !avr32_const_ok_for_constraint_p (INTVAL (operands[3]), 'K', "Ks08"))
6776 + {
6777 + if (no_new_pseudos)
6778 + return FALSE;
6779 + else
6780 + operands[3] = force_reg (mode, operands[3]);
6781 + }
6782 +
6783 + if (GET_CODE (operands[0]) != REG)
6784 + {
6785 + if (no_new_pseudos)
6786 + return FALSE;
6787 + else
6788 + operands[0] = force_reg (GET_MODE (operands[0]), operands[0]);
6789 + }
6790 +
6791 + if (GET_CODE (operands[2]) != REG)
6792 + {
6793 + if (no_new_pseudos)
6794 + return FALSE;
6795 + else
6796 + operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
6797 + }
6798 +
6799 + /* Check if operands[0] and operands[2] are different */
6800 + if (REGNO (operands[0]) != REGNO (operands[2]))
6801 + {
6802 + emit_move_insn (operands[0], operands[2]);
6803 + operands[2] = operands[0];
6804 + }
6805 +
6806 + /* Emit the actual instruction */
6807 + operator = gen_rtx_EQ (VOIDmode, const0_rtx, const0_rtx);
6808 + PUT_CODE (operator, GET_CODE (operands[1]));
6809 + switch (mode)
6810 + {
6811 + case SImode:
6812 + switch (GET_MODE (compare_op0))
6813 + {
6814 + case SImode:
6815 + emit_insn (gen_addsicc_cmpsi
6816 + (operands[0], operator, operands[2], operands[3],
6817 + compare_op0, compare_op1));
6818 + break;
6819 + case DImode:
6820 + emit_insn (gen_addsicc_cmpdi
6821 + (operands[0], operator, operands[2], operands[3],
6822 + compare_op0, compare_op1));
6823 + break;
6824 + case HImode:
6825 + emit_insn (gen_addsicc_cmphi
6826 + (operands[0], operator, operands[2], operands[3],
6827 + compare_op0, compare_op1));
6828 + break;
6829 + case QImode:
6830 + emit_insn (gen_addsicc_cmpqi
6831 + (operands[0], operator, operands[2], operands[3],
6832 + compare_op0, compare_op1));
6833 + break;
6834 + default:
6835 + return FALSE;
6836 + }
6837 + break;
6838 + case HImode:
6839 + switch (GET_MODE (compare_op0))
6840 + {
6841 + case SImode:
6842 + emit_insn (gen_addhicc_cmpsi
6843 + (operands[0], operator, operands[2], operands[3],
6844 + compare_op0, compare_op1));
6845 + break;
6846 + case DImode:
6847 + emit_insn (gen_addhicc_cmpdi
6848 + (operands[0], operator, operands[2], operands[3],
6849 + compare_op0, compare_op1));
6850 + break;
6851 + case HImode:
6852 + emit_insn (gen_addhicc_cmphi
6853 + (operands[0], operator, operands[2], operands[3],
6854 + compare_op0, compare_op1));
6855 + break;
6856 + case QImode:
6857 + emit_insn (gen_addhicc_cmpqi
6858 + (operands[0], operator, operands[2], operands[3],
6859 + compare_op0, compare_op1));
6860 + break;
6861 + default:
6862 + return FALSE;
6863 + }
6864 + break;
6865 + case QImode:
6866 + switch (GET_MODE (compare_op0))
6867 + {
6868 + case SImode:
6869 + emit_insn (gen_addqicc_cmpsi
6870 + (operands[0], operator, operands[2], operands[3],
6871 + compare_op0, compare_op1));
6872 + break;
6873 + case DImode:
6874 + emit_insn (gen_addqicc_cmpdi
6875 + (operands[0], operator, operands[2], operands[3],
6876 + compare_op0, compare_op1));
6877 + break;
6878 + case HImode:
6879 + emit_insn (gen_addqicc_cmphi
6880 + (operands[0], operator, operands[2], operands[3],
6881 + compare_op0, compare_op1));
6882 + break;
6883 + case QImode:
6884 + emit_insn (gen_addqicc_cmpqi
6885 + (operands[0], operator, operands[2], operands[3],
6886 + compare_op0, compare_op1));
6887 + break;
6888 + default:
6889 + return FALSE;
6890 + }
6891 + break;
6892 + default:
6893 + return FALSE;
6894 + }
6895 +
6896 + return TRUE;
6897 +}
6898 +
6899 +/* Function for changing the condition on the next instruction,
6900 + should be used when emmiting compare instructions and
6901 + the condition of the next instruction needs to change.
6902 +*/
6903 +int
6904 +set_next_insn_cond (rtx cur_insn, rtx new_cond)
6905 +{
6906 + rtx next_insn = next_nonnote_insn (cur_insn);
6907 + if ((next_insn != NULL_RTX)
6908 + && (INSN_P (next_insn))
6909 + && (GET_CODE (PATTERN (next_insn)) == SET)
6910 + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
6911 + {
6912 + /* Branch instructions */
6913 + XEXP (SET_SRC (PATTERN (next_insn)), 0) = new_cond;
6914 + /* Force the instruction to be recognized again */
6915 + INSN_CODE (next_insn) = -1;
6916 + return TRUE;
6917 + }
6918 + else if ((next_insn != NULL_RTX)
6919 + && (INSN_P (next_insn))
6920 + && (GET_CODE (PATTERN (next_insn)) == SET)
6921 + && comparison_operator (SET_SRC (PATTERN (next_insn)),
6922 + GET_MODE (SET_SRC (PATTERN (next_insn)))))
6923 + {
6924 + /* scc with no compare */
6925 + SET_SRC (PATTERN (next_insn)) = new_cond;
6926 + /* Force the instruction to be recognized again */
6927 + INSN_CODE (next_insn) = -1;
6928 + return TRUE;
6929 + }
6930 +
6931 + return FALSE;
6932 +}
6933 +
6934 +/* Function for obtaining the condition for the next instruction
6935 + after cur_insn.
6936 +*/
6937 +rtx
6938 +get_next_insn_cond (rtx cur_insn)
6939 +{
6940 + rtx next_insn = next_nonnote_insn (cur_insn);
6941 + rtx cond = NULL_RTX;
6942 + if ((next_insn != NULL_RTX)
6943 + && (INSN_P (next_insn))
6944 + && (GET_CODE (PATTERN (next_insn)) == SET)
6945 + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
6946 + {
6947 + /* Branch instructions */
6948 + cond = XEXP (SET_SRC (PATTERN (next_insn)), 0);
6949 + }
6950 + else if ((next_insn != NULL_RTX)
6951 + && (INSN_P (next_insn))
6952 + && (GET_CODE (PATTERN (next_insn)) == SET)
6953 + && comparison_operator (SET_SRC (PATTERN (next_insn)),
6954 + GET_MODE (SET_SRC (PATTERN (next_insn)))))
6955 + {
6956 + /* scc with no compare */
6957 + cond = SET_SRC (PATTERN (next_insn));
6958 + }
6959 +
6960 + return cond;
6961 +}
6962 +
6963 +int
6964 +avr32_expand_scc (enum rtx_code cond, rtx * operands)
6965 +{
6966 +
6967 + rtx comparation;
6968 + /* Only allow certain compare operations */
6969 + if (GET_MODE (avr32_compare_op0) != DImode
6970 + && GET_MODE (avr32_compare_op0) != SImode
6971 + && GET_MODE (avr32_compare_op0) != HImode
6972 + && GET_MODE (avr32_compare_op0) != QImode)
6973 + return FALSE;
6974 +
6975 + /* Delete compare instruction as it is merged into this instruction */
6976 + remove_insn (get_last_insn_anywhere ());
6977 +
6978 + if (!REG_P (avr32_compare_op0))
6979 + avr32_compare_op0 =
6980 + force_reg (GET_MODE (avr32_compare_op0), avr32_compare_op0);
6981 +
6982 + if (GET_MODE (avr32_compare_op0) != SImode && !REG_P (avr32_compare_op1))
6983 + {
6984 + avr32_compare_op1 =
6985 + force_reg (GET_MODE (avr32_compare_op0), avr32_compare_op1);
6986 + }
6987 + else if (GET_MODE (avr32_compare_op0) == SImode
6988 + && !REG_P (avr32_compare_op1)
6989 + && (GET_CODE (avr32_compare_op1) != CONST_INT
6990 + || (GET_CODE (avr32_compare_op1) == CONST_INT
6991 + &&
6992 + !avr32_const_ok_for_constraint_p (INTVAL
6993 + (avr32_compare_op1), 'K',
6994 + "Ks21"))))
6995 + avr32_compare_op1 =
6996 + force_reg (GET_MODE (avr32_compare_op0), avr32_compare_op1);
6997 +
6998 +
6999 + comparation =
7000 + gen_rtx_EQ (SImode,
7001 + gen_rtx_COMPARE (GET_MODE (avr32_compare_op0),
7002 + avr32_compare_op0, avr32_compare_op1),
7003 + const0_rtx);
7004 + /* Set correct condition */
7005 + PUT_CODE (comparation, cond);
7006 + emit_insn (gen_rtx_SET (VOIDmode, operands[0], comparation));
7007 + return TRUE;
7008 +}
7009 +
7010 +rtx
7011 +avr32_output_cmp (rtx cond, enum machine_mode mode, rtx op0, rtx op1)
7012 +{
7013 +
7014 + rtx new_cond = NULL_RTX;
7015 + rtx ops[2];
7016 + rtx compare_pattern;
7017 + ops[0] = op0;
7018 + ops[1] = op1;
7019 +
7020 + compare_pattern = gen_rtx_COMPARE (mode, op0, op1);
7021 +
7022 + new_cond = is_compare_redundant (compare_pattern, cond);
7023 +
7024 + if (new_cond != NULL_RTX)
7025 + return new_cond;
7026 +
7027 + /* Insert compare */
7028 + switch (mode)
7029 + {
7030 + case QImode:
7031 + output_asm_insn ("cp.b\t%0, %1", ops);
7032 + break;
7033 + case HImode:
7034 + output_asm_insn ("cp.h\t%0, %1", ops);
7035 + break;
7036 + case SImode:
7037 + output_asm_insn ("cp.w\t%0, %1", ops);
7038 + break;
7039 + case DImode:
7040 + if (rtx_equal_p (op1, const0_rtx))
7041 + output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0", ops);
7042 + else
7043 + output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0, %m1", ops);
7044 + break;
7045 + default:
7046 + internal_error ("Unknown comparison mode");
7047 + break;
7048 + }
7049 +
7050 + return cond;
7051 +}
7052 +
7053 +int
7054 +avr32_load_multiple_operation (rtx op,
7055 + enum machine_mode mode ATTRIBUTE_UNUSED)
7056 +{
7057 + int count = XVECLEN (op, 0);
7058 + unsigned int dest_regno;
7059 + rtx src_addr;
7060 + rtx elt;
7061 + int i = 1, base = 0;
7062 +
7063 + if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
7064 + return 0;
7065 +
7066 + /* Check to see if this might be a write-back. */
7067 + if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
7068 + {
7069 + i++;
7070 + base = 1;
7071 +
7072 + /* Now check it more carefully. */
7073 + if (GET_CODE (SET_DEST (elt)) != REG
7074 + || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
7075 + || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
7076 + || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
7077 + return 0;
7078 + }
7079 +
7080 + /* Perform a quick check so we don't blow up below. */
7081 + if (count <= 1
7082 + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
7083 + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
7084 + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
7085 + return 0;
7086 +
7087 + dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
7088 + src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
7089 +
7090 + for (; i < count; i++)
7091 + {
7092 + elt = XVECEXP (op, 0, i);
7093 +
7094 + if (GET_CODE (elt) != SET
7095 + || GET_CODE (SET_DEST (elt)) != REG
7096 + || GET_MODE (SET_DEST (elt)) != SImode
7097 + || GET_CODE (SET_SRC (elt)) != UNSPEC)
7098 + return 0;
7099 + }
7100 +
7101 + return 1;
7102 +}
7103 +
7104 +int
7105 +avr32_store_multiple_operation (rtx op,
7106 + enum machine_mode mode ATTRIBUTE_UNUSED)
7107 +{
7108 + int count = XVECLEN (op, 0);
7109 + int src_regno;
7110 + rtx dest_addr;
7111 + rtx elt;
7112 + int i = 1;
7113 +
7114 + if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
7115 + return 0;
7116 +
7117 + /* Perform a quick check so we don't blow up below. */
7118 + if (count <= i
7119 + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
7120 + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
7121 + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
7122 + return 0;
7123 +
7124 + src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
7125 + dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
7126 +
7127 + for (; i < count; i++)
7128 + {
7129 + elt = XVECEXP (op, 0, i);
7130 +
7131 + if (GET_CODE (elt) != SET
7132 + || GET_CODE (SET_DEST (elt)) != MEM
7133 + || GET_MODE (SET_DEST (elt)) != SImode
7134 + || GET_CODE (SET_SRC (elt)) != UNSPEC)
7135 + return 0;
7136 + }
7137 +
7138 + return 1;
7139 +}
7140 +
7141 +int
7142 +avr32_valid_macmac_bypass (rtx insn_out, rtx insn_in)
7143 +{
7144 + /* Check if they use the same accumulator */
7145 + if (rtx_equal_p
7146 + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
7147 + {
7148 + return TRUE;
7149 + }
7150 +
7151 + return FALSE;
7152 +}
7153 +
7154 +int
7155 +avr32_valid_mulmac_bypass (rtx insn_out, rtx insn_in)
7156 +{
7157 + /*
7158 + Check if the mul instruction produces the accumulator for the mac
7159 + instruction. */
7160 + if (rtx_equal_p
7161 + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
7162 + {
7163 + return TRUE;
7164 + }
7165 + return FALSE;
7166 +}
7167 +
7168 +int
7169 +avr32_store_bypass (rtx insn_out, rtx insn_in)
7170 +{
7171 + /* Only valid bypass if the output result is used as an src in the store
7172 + instruction, NOT if used as a pointer or base. */
7173 + if (rtx_equal_p
7174 + (SET_DEST (PATTERN (insn_out)), SET_SRC (PATTERN (insn_in))))
7175 + {
7176 + return TRUE;
7177 + }
7178 +
7179 + return FALSE;
7180 +}
7181 +
7182 +int
7183 +avr32_mul_waw_bypass (rtx insn_out, rtx insn_in)
7184 +{
7185 + /* Check if the register holding the result from the mul instruction is
7186 + used as a result register in the input instruction. */
7187 + if (rtx_equal_p
7188 + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
7189 + {
7190 + return TRUE;
7191 + }
7192 +
7193 + return FALSE;
7194 +}
7195 +
7196 +int
7197 +avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in)
7198 +{
7199 + /* Check if the first loaded word in insn_out is used in insn_in. */
7200 + rtx dst_reg;
7201 + rtx second_loaded_reg;
7202 +
7203 + /* If this is a double alu operation then the bypass is not valid */
7204 + if ((get_attr_type (insn_in) == TYPE_ALU
7205 + || get_attr_type (insn_in) == TYPE_ALU2)
7206 + && (GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (insn_out)))) > 4))
7207 + return FALSE;
7208 +
7209 + /* Get the destination register in the load */
7210 + if (!REG_P (SET_DEST (PATTERN (insn_out))))
7211 + return FALSE;
7212 +
7213 + dst_reg = SET_DEST (PATTERN (insn_out));
7214 + second_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 1);
7215 +
7216 + if (!reg_mentioned_p (second_loaded_reg, PATTERN (insn_in)))
7217 + return TRUE;
7218 +
7219 + return FALSE;
7220 +}
7221 +
7222 +
7223 +int
7224 +avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in)
7225 +{
7226 + /*
7227 + Check if the two first loaded word in insn_out are used in insn_in. */
7228 + rtx dst_reg;
7229 + rtx third_loaded_reg, fourth_loaded_reg;
7230 +
7231 + /* Get the destination register in the load */
7232 + if (!REG_P (SET_DEST (PATTERN (insn_out))))
7233 + return FALSE;
7234 +
7235 + dst_reg = SET_DEST (PATTERN (insn_out));
7236 + third_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 2);
7237 + fourth_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 3);
7238 +
7239 + if (!reg_mentioned_p (third_loaded_reg, PATTERN (insn_in))
7240 + && !reg_mentioned_p (fourth_loaded_reg, PATTERN (insn_in)))
7241 + {
7242 + return TRUE;
7243 + }
7244 +
7245 + return FALSE;
7246 +}
7247 +
7248 +int
7249 +avr32_sched_use_dfa_pipeline_interface (void)
7250 +{
7251 + /* No need to scedule on avr32_uc architecture. */
7252 + return (avr32_arch->arch_type != ARCH_TYPE_AVR32_UC);
7253 +}
7254 +
7255 +void
7256 +avr32_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED,
7257 + rtx x ATTRIBUTE_UNUSED,
7258 + unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
7259 +{
7260 + /* Let ASM_OUTPUT_POOL_PROLOGUE take care of this */
7261 +}
7262 +
7263 +/* Set up library functions to comply to AVR32 ABI */
7264 +
7265 +static void
7266 +avr32_init_libfuncs (void)
7267 +{
7268 + /* Convert gcc run-time function names to AVR32 ABI names */
7269 +
7270 + /* Double-precision floating-point arithmetic. */
7271 + set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add");
7272 + set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div");
7273 + set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul");
7274 + set_optab_libfunc (neg_optab, DFmode, NULL);
7275 + set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub");
7276 +
7277 + /* Double-precision comparisons. */
7278 + set_optab_libfunc (eq_optab, DFmode, "__avr32_f64_cmp_eq");
7279 + set_optab_libfunc (ne_optab, DFmode, NULL);
7280 + set_optab_libfunc (lt_optab, DFmode, "__avr32_f64_cmp_lt");
7281 + set_optab_libfunc (le_optab, DFmode, NULL);
7282 + set_optab_libfunc (ge_optab, DFmode, "__avr32_f64_cmp_ge");
7283 + set_optab_libfunc (gt_optab, DFmode, NULL);
7284 +
7285 + /* Single-precision floating-point arithmetic. */
7286 + set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add");
7287 + set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div");
7288 + set_optab_libfunc (smul_optab, SFmode, "__avr32_f32_mul");
7289 + set_optab_libfunc (neg_optab, SFmode, NULL);
7290 + set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub");
7291 +
7292 + /* Single-precision comparisons. */
7293 + set_optab_libfunc (eq_optab, SFmode, "__avr32_f32_cmp_eq");
7294 + set_optab_libfunc (ne_optab, SFmode, NULL);
7295 + set_optab_libfunc (lt_optab, SFmode, "__avr32_f32_cmp_lt");
7296 + set_optab_libfunc (le_optab, SFmode, NULL);
7297 + set_optab_libfunc (ge_optab, SFmode, "__avr32_f32_cmp_ge");
7298 + set_optab_libfunc (gt_optab, SFmode, NULL);
7299 +
7300 + /* Floating-point to integer conversions. */
7301 + set_conv_libfunc (sfix_optab, SImode, DFmode, "__avr32_f64_to_s32");
7302 + set_conv_libfunc (ufix_optab, SImode, DFmode, "__avr32_f64_to_u32");
7303 + set_conv_libfunc (sfix_optab, DImode, DFmode, "__avr32_f64_to_s64");
7304 + set_conv_libfunc (ufix_optab, DImode, DFmode, "__avr32_f64_to_u64");
7305 + set_conv_libfunc (sfix_optab, SImode, SFmode, "__avr32_f32_to_s32");
7306 + set_conv_libfunc (ufix_optab, SImode, SFmode, "__avr32_f32_to_u32");
7307 + set_conv_libfunc (sfix_optab, DImode, SFmode, "__avr32_f32_to_s64");
7308 + set_conv_libfunc (ufix_optab, DImode, SFmode, "__avr32_f32_to_u64");
7309 +
7310 + /* Conversions between floating types. */
7311 + set_conv_libfunc (trunc_optab, SFmode, DFmode, "__avr32_f64_to_f32");
7312 + set_conv_libfunc (sext_optab, DFmode, SFmode, "__avr32_f32_to_f64");
7313 +
7314 + /* Integer to floating-point conversions. Table 8. */
7315 + set_conv_libfunc (sfloat_optab, DFmode, SImode, "__avr32_s32_to_f64");
7316 + set_conv_libfunc (sfloat_optab, DFmode, DImode, "__avr32_s64_to_f64");
7317 + set_conv_libfunc (sfloat_optab, SFmode, SImode, "__avr32_s32_to_f32");
7318 + set_conv_libfunc (sfloat_optab, SFmode, DImode, "__avr32_s64_to_f32");
7319 + set_conv_libfunc (ufloat_optab, DFmode, SImode, "__avr32_u32_to_f64");
7320 + set_conv_libfunc (ufloat_optab, SFmode, SImode, "__avr32_u32_to_f32");
7321 + /* TODO: Add these to gcc library functions */
7322 +
7323 + set_conv_libfunc (ufloat_optab, DFmode, DImode, NULL);
7324 + set_conv_libfunc (ufloat_optab, SFmode, DImode, NULL);
7325 +
7326 + /* Long long. Table 9. */
7327 + set_optab_libfunc (smul_optab, DImode, "__avr32_mul64");
7328 + set_optab_libfunc (sdiv_optab, DImode, "__avr32_sdiv64");
7329 + set_optab_libfunc (udiv_optab, DImode, "__avr32_udiv64");
7330 + set_optab_libfunc (smod_optab, DImode, "__avr32_smod64");
7331 + set_optab_libfunc (umod_optab, DImode, "__avr32_umod64");
7332 + set_optab_libfunc (ashl_optab, DImode, "__avr32_lsl64");
7333 + set_optab_libfunc (lshr_optab, DImode, "__avr32_lsr64");
7334 + set_optab_libfunc (ashr_optab, DImode, "__avr32_asr64");
7335 +}
7336 diff -Nur gcc-4.1.2/gcc/config/avr32/avr32-elf.h gcc-4.1.2-owrt/gcc/config/avr32/avr32-elf.h
7337 --- gcc-4.1.2/gcc/config/avr32/avr32-elf.h 1970-01-01 01:00:00.000000000 +0100
7338 +++ gcc-4.1.2-owrt/gcc/config/avr32/avr32-elf.h 2007-05-24 12:03:28.000000000 +0200
7339 @@ -0,0 +1,82 @@
7340 +/*
7341 + Elf specific definitions.
7342 + Copyright 2003-2006 Atmel Corporation.
7343 +
7344 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
7345 +
7346 + This file is part of GCC.
7347 +
7348 + This program is free software; you can redistribute it and/or modify
7349 + it under the terms of the GNU General Public License as published by
7350 + the Free Software Foundation; either version 2 of the License, or
7351 + (at your option) any later version.
7352 +
7353 + This program is distributed in the hope that it will be useful,
7354 + but WITHOUT ANY WARRANTY; without even the implied warranty of
7355 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7356 + GNU General Public License for more details.
7357 +
7358 + You should have received a copy of the GNU General Public License
7359 + along with this program; if not, write to the Free Software
7360 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
7361 +
7362 +
7363 +/*****************************************************************************
7364 + * Controlling the Compilator Driver, 'gcc'
7365 + *****************************************************************************/
7366 +
7367 +/* Run-time Target Specification. */
7368 +#undef TARGET_VERSION
7369 +#define TARGET_VERSION fputs (" (AVR32 GNU with ELF)", stderr);
7370 +
7371 +/*
7372 +Another C string constant used much like LINK_SPEC. The
7373 +difference between the two is that STARTFILE_SPEC is used at
7374 +the very beginning of the command given to the linker.
7375 +
7376 +If this macro is not defined, a default is provided that loads the
7377 +standard C startup file from the usual place. See gcc.c.
7378 +*/
7379 +#undef STARTFILE_SPEC
7380 +#define STARTFILE_SPEC "crt0%O%s crti%O%s crtbegin%O%s"
7381 +
7382 +#undef LINK_SPEC
7383 +#define LINK_SPEC "%{muse-oscall:--defsym __do_not_use_oscall_coproc__=0} %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}} %{mpart=*:-mavr32elf_%*} %{mcpu=*:-mavr32elf_%*}"
7384 +
7385 +
7386 +/*
7387 +Another C string constant used much like LINK_SPEC. The
7388 +difference between the two is that ENDFILE_SPEC is used at
7389 +the very end of the command given to the linker.
7390 +
7391 +Do not define this macro if it does not need to do anything.
7392 +*/
7393 +#undef ENDFILE_SPEC
7394 +#define ENDFILE_SPEC "crtend%O%s crtn%O%s"
7395 +
7396 +
7397 +/* Target CPU builtins. */
7398 +#define TARGET_CPU_CPP_BUILTINS() \
7399 + do \
7400 + { \
7401 + builtin_define ("__avr32__"); \
7402 + builtin_define ("__AVR32__"); \
7403 + builtin_define ("__AVR32_ELF__"); \
7404 + builtin_define (avr32_part->macro); \
7405 + builtin_define (avr32_arch->macro); \
7406 + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \
7407 + builtin_define ("__AVR32_AVR32A__"); \
7408 + else \
7409 + builtin_define ("__AVR32_AVR32B__"); \
7410 + if (TARGET_UNALIGNED_WORD) \
7411 + builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \
7412 + if (TARGET_SIMD) \
7413 + builtin_define ("__AVR32_HAS_SIMD__"); \
7414 + if (TARGET_DSP) \
7415 + builtin_define ("__AVR32_HAS_DSP__"); \
7416 + if (TARGET_RMW) \
7417 + builtin_define ("__AVR32_HAS_RMW__"); \
7418 + if (TARGET_BRANCH_PRED) \
7419 + builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
7420 + } \
7421 + while (0)
7422 diff -Nur gcc-4.1.2/gcc/config/avr32/avr32.h gcc-4.1.2-owrt/gcc/config/avr32/avr32.h
7423 --- gcc-4.1.2/gcc/config/avr32/avr32.h 1970-01-01 01:00:00.000000000 +0100
7424 +++ gcc-4.1.2-owrt/gcc/config/avr32/avr32.h 2007-05-24 12:03:28.000000000 +0200
7425 @@ -0,0 +1,3322 @@
7426 +/*
7427 + Definitions of target machine for AVR32.
7428 + Copyright 2003-2006 Atmel Corporation.
7429 +
7430 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
7431 + Initial porting by Anders �dland.
7432 +
7433 + This file is part of GCC.
7434 +
7435 + This program is free software; you can redistribute it and/or modify
7436 + it under the terms of the GNU General Public License as published by
7437 + the Free Software Foundation; either version 2 of the License, or
7438 + (at your option) any later version.
7439 +
7440 + This program is distributed in the hope that it will be useful,
7441 + but WITHOUT ANY WARRANTY; without even the implied warranty of
7442 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7443 + GNU General Public License for more details.
7444 +
7445 + You should have received a copy of the GNU General Public License
7446 + along with this program; if not, write to the Free Software
7447 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
7448 +
7449 +#ifndef GCC_AVR32_H
7450 +#define GCC_AVR32_H
7451 +
7452 +
7453 +#ifndef OBJECT_FORMAT_ELF
7454 +#error avr32.h included before elfos.h
7455 +#endif
7456 +
7457 +#ifndef LOCAL_LABEL_PREFIX
7458 +#define LOCAL_LABEL_PREFIX "."
7459 +#endif
7460 +
7461 +#ifndef SUBTARGET_CPP_SPEC
7462 +#define SUBTARGET_CPP_SPEC "-D__ELF__"
7463 +#endif
7464 +
7465 +
7466 +extern struct rtx_def *avr32_compare_op0;
7467 +extern struct rtx_def *avr32_compare_op1;
7468 +
7469 +
7470 +extern struct rtx_def *avr32_acc_cache;
7471 +
7472 +/* cache instruction op5 codes */
7473 +#define AVR32_CACHE_INVALIDATE_ICACHE 1
7474 +
7475 +/* These bits describe the different types of function supported
7476 + by the AVR32 backend. They are exclusive. ie a function cannot be both a
7477 + normal function and an interworked function, for example. Knowing the
7478 + type of a function is important for determining its prologue and
7479 + epilogue sequences.
7480 + Note value 7 is currently unassigned. Also note that the interrupt
7481 + function types all have bit 2 set, so that they can be tested for easily.
7482 + Note that 0 is deliberately chosen for AVR32_FT_UNKNOWN so that when the
7483 + machine_function structure is initialized (to zero) func_type will
7484 + default to unknown. This will force the first use of avr32_current_func_type
7485 + to call avr32_compute_func_type. */
7486 +#define AVR32_FT_UNKNOWN 0 /* Type has not yet been determined.
7487 + */
7488 +#define AVR32_FT_NORMAL 1 /* Your normal, straightforward
7489 + function. */
7490 +#define AVR32_FT_ACALL 2 /* An acall function. */
7491 +#define AVR32_FT_EXCEPTION_HANDLER 3 /* A C++ exception handler. */
7492 +#define AVR32_FT_ISR_FULL 4 /* A fully shadowed interrupt mode. */
7493 +#define AVR32_FT_ISR_HALF 5 /* A half shadowed interrupt mode. */
7494 +#define AVR32_FT_ISR_NONE 6 /* No shadow registers. */
7495 +
7496 +#define AVR32_FT_TYPE_MASK ((1 << 3) - 1)
7497 +
7498 +/* In addition functions can have several type modifiers,
7499 + outlined by these bit masks: */
7500 +#define AVR32_FT_INTERRUPT (1 << 2) /* Note overlap with FT_ISR
7501 + and above. */
7502 +#define AVR32_FT_NAKED (1 << 3) /* No prologue or epilogue. */
7503 +#define AVR32_FT_VOLATILE (1 << 4) /* Does not return. */
7504 +#define AVR32_FT_NESTED (1 << 5) /* Embedded inside another
7505 + func. */
7506 +
7507 +/* Some macros to test these flags. */
7508 +#define AVR32_FUNC_TYPE(t) (t & AVR32_FT_TYPE_MASK)
7509 +#define IS_INTERRUPT(t) (t & AVR32_FT_INTERRUPT)
7510 +#define IS_VOLATILE(t) (t & AVR32_FT_VOLATILE)
7511 +#define IS_NAKED(t) (t & AVR32_FT_NAKED)
7512 +#define IS_NESTED(t) (t & AVR32_FT_NESTED)
7513 +
7514 +
7515 +typedef struct minipool_labels
7516 +GTY ((chain_next ("%h.next"), chain_prev ("%h.prev")))
7517 +{
7518 + rtx label;
7519 + struct minipool_labels *prev;
7520 + struct minipool_labels *next;
7521 +} minipool_labels;
7522 +
7523 +/* A C structure for machine-specific, per-function data.
7524 + This is added to the cfun structure. */
7525 +
7526 +typedef struct machine_function
7527 +GTY (())
7528 +{
7529 + /* Records the type of the current function. */
7530 + unsigned long func_type;
7531 + /* List of minipool labels, use for checking if code label is valid in a
7532 + memory expression */
7533 + minipool_labels *minipool_label_head;
7534 + minipool_labels *minipool_label_tail;
7535 +} machine_function;
7536 +
7537 +/* Initialize data used by insn expanders. This is called from insn_emit,
7538 + once for every function before code is generated. */
7539 +#define INIT_EXPANDERS avr32_init_expanders ()
7540 +
7541 +/******************************************************************************
7542 + * SPECS
7543 + *****************************************************************************/
7544 +
7545 +#ifndef ASM_SPEC
7546 +#define ASM_SPEC "%{fpic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{march=*:-march=%*} %{mpart=*:-mpart=%*}"
7547 +#endif
7548 +
7549 +#ifndef MULTILIB_DEFAULTS
7550 +#define MULTILIB_DEFAULTS { "march=ap" }
7551 +#endif
7552 +
7553 +/******************************************************************************
7554 + * Run-time Target Specification
7555 + *****************************************************************************/
7556 +#ifndef TARGET_VERSION
7557 +#define TARGET_VERSION fprintf(stderr, " (AVR32, GNU assembler syntax)");
7558 +#endif
7559 +
7560 +/* Part types. Keep this in sync with the order of avr32_part_types in avr32.c*/
7561 +enum part_type
7562 +{
7563 + PART_TYPE_AVR32_NONE,
7564 + PART_TYPE_AVR32_AP7000,
7565 + PART_TYPE_AVR32_AP7010,
7566 + PART_TYPE_AVR32_AP7020,
7567 + PART_TYPE_AVR32_UC3A0256,
7568 + PART_TYPE_AVR32_UC3A0512,
7569 + PART_TYPE_AVR32_UC3A1128,
7570 + PART_TYPE_AVR32_UC3A1256,
7571 + PART_TYPE_AVR32_UC3A1512
7572 +};
7573 +
7574 +/* Microarchitectures. */
7575 +enum microarchitecture_type
7576 +{
7577 + UARCH_TYPE_AVR32A,
7578 + UARCH_TYPE_AVR32B
7579 +};
7580 +
7581 +/* Architectures types which specifies the pipeline.
7582 + Keep this in sync with avr32_arch_types in avr32.c*/
7583 +enum architecture_type
7584 +{
7585 + ARCH_TYPE_AVR32_AP,
7586 + ARCH_TYPE_AVR32_UC
7587 +};
7588 +
7589 +/* Flag specifying if the cpu has support for DSP instructions.*/
7590 +#define FLAG_AVR32_HAS_DSP (1 << 0)
7591 +/* Flag specifying if the cpu has support for Read-Modify-Write
7592 + instructions.*/
7593 +#define FLAG_AVR32_HAS_RMW (1 << 1)
7594 +/* Flag specifying if the cpu has support for SIMD instructions. */
7595 +#define FLAG_AVR32_HAS_SIMD (1 << 2)
7596 +/* Flag specifying if the cpu has support for unaligned memory word access. */
7597 +#define FLAG_AVR32_HAS_UNALIGNED_WORD (1 << 3)
7598 +/* Flag specifying if the cpu has support for branch prediction. */
7599 +#define FLAG_AVR32_HAS_BRANCH_PRED (1 << 4)
7600 +
7601 +/* Structure for holding information about different avr32 CPUs/parts */
7602 +struct part_type_s
7603 +{
7604 + const char *const name;
7605 + enum part_type part_type;
7606 + enum architecture_type arch_type;
7607 + /* Must lie outside user's namespace. NULL == no macro. */
7608 + const char *const macro;
7609 +};
7610 +
7611 +/* Structure for holding information about different avr32 pipeline
7612 + architectures. */
7613 +struct arch_type_s
7614 +{
7615 + const char *const name;
7616 + enum architecture_type arch_type;
7617 + enum microarchitecture_type uarch_type;
7618 + const unsigned long feature_flags;
7619 + /* Must lie outside user's namespace. NULL == no macro. */
7620 + const char *const macro;
7621 +};
7622 +
7623 +extern const struct part_type_s *avr32_part;
7624 +extern const struct arch_type_s *avr32_arch;
7625 +
7626 +#define TARGET_SIMD (avr32_arch->feature_flags & FLAG_AVR32_HAS_SIMD)
7627 +#define TARGET_DSP (avr32_arch->feature_flags & FLAG_AVR32_HAS_DSP)
7628 +#define TARGET_RMW (avr32_arch->feature_flags & FLAG_AVR32_HAS_RMW)
7629 +#define TARGET_UNALIGNED_WORD (avr32_arch->feature_flags & FLAG_AVR32_HAS_UNALIGNED_WORD)
7630 +#define TARGET_BRANCH_PRED (avr32_arch->feature_flags & FLAG_AVR32_HAS_BRANCH_PRED)
7631 +
7632 +#define CAN_DEBUG_WITHOUT_FP
7633 +
7634 +/******************************************************************************
7635 + * Storage Layout
7636 + *****************************************************************************/
7637 +
7638 +/*
7639 +Define this macro to have the value 1 if the most significant bit in a
7640 +byte has the lowest number; otherwise define it to have the value zero.
7641 +This means that bit-field instructions count from the most significant
7642 +bit. If the machine has no bit-field instructions, then this must still
7643 +be defined, but it doesn't matter which value it is defined to. This
7644 +macro need not be a constant.
7645 +
7646 +This macro does not affect the way structure fields are packed into
7647 +bytes or words; that is controlled by BYTES_BIG_ENDIAN.
7648 +*/
7649 +#define BITS_BIG_ENDIAN 0
7650 +
7651 +/*
7652 +Define this macro to have the value 1 if the most significant byte in a
7653 +word has the lowest number. This macro need not be a constant.
7654 +*/
7655 +/*
7656 + Data is stored in an big-endian way.
7657 +*/
7658 +#define BYTES_BIG_ENDIAN 1
7659 +
7660 +/*
7661 +Define this macro to have the value 1 if, in a multiword object, the
7662 +most significant word has the lowest number. This applies to both
7663 +memory locations and registers; GCC fundamentally assumes that the
7664 +order of words in memory is the same as the order in registers. This
7665 +macro need not be a constant.
7666 +*/
7667 +/*
7668 + Data is stored in an bin-endian way.
7669 +*/
7670 +#define WORDS_BIG_ENDIAN 1
7671 +
7672 +/*
7673 +Define this macro if WORDS_BIG_ENDIAN is not constant. This must be a
7674 +constant value with the same meaning as WORDS_BIG_ENDIAN, which will be
7675 +used only when compiling libgcc2.c. Typically the value will be set
7676 +based on preprocessor defines.
7677 +*/
7678 +#define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
7679 +
7680 +/*
7681 +Define this macro to have the value 1 if DFmode, XFmode or
7682 +TFmode floating point numbers are stored in memory with the word
7683 +containing the sign bit at the lowest address; otherwise define it to
7684 +have the value 0. This macro need not be a constant.
7685 +
7686 +You need not define this macro if the ordering is the same as for
7687 +multi-word integers.
7688 +*/
7689 +/* #define FLOAT_WORDS_BIG_ENDIAN 1 */
7690 +
7691 +/*
7692 +Define this macro to be the number of bits in an addressable storage
7693 +unit (byte); normally 8.
7694 +*/
7695 +#define BITS_PER_UNIT 8
7696 +
7697 +/*
7698 +Number of bits in a word; normally 32.
7699 +*/
7700 +#define BITS_PER_WORD 32
7701 +
7702 +/*
7703 +Maximum number of bits in a word. If this is undefined, the default is
7704 +BITS_PER_WORD. Otherwise, it is the constant value that is the
7705 +largest value that BITS_PER_WORD can have at run-time.
7706 +*/
7707 +/* MAX_BITS_PER_WORD not defined*/
7708 +
7709 +/*
7710 +Number of storage units in a word; normally 4.
7711 +*/
7712 +#define UNITS_PER_WORD 4
7713 +
7714 +/*
7715 +Minimum number of units in a word. If this is undefined, the default is
7716 +UNITS_PER_WORD. Otherwise, it is the constant value that is the
7717 +smallest value that UNITS_PER_WORD can have at run-time.
7718 +*/
7719 +/* MIN_UNITS_PER_WORD not defined */
7720 +
7721 +/*
7722 +Width of a pointer, in bits. You must specify a value no wider than the
7723 +width of Pmode. If it is not equal to the width of Pmode,
7724 +you must define POINTERS_EXTEND_UNSIGNED.
7725 +*/
7726 +#define POINTER_SIZE 32
7727 +
7728 +/*
7729 +A C expression whose value is greater than zero if pointers that need to be
7730 +extended from being POINTER_SIZE bits wide to Pmode are to
7731 +be zero-extended and zero if they are to be sign-extended. If the value
7732 +is less then zero then there must be an "ptr_extend" instruction that
7733 +extends a pointer from POINTER_SIZE to Pmode.
7734 +
7735 +You need not define this macro if the POINTER_SIZE is equal
7736 +to the width of Pmode.
7737 +*/
7738 +/* #define POINTERS_EXTEND_UNSIGNED */
7739 +
7740 +/*
7741 +A Macro to update M and UNSIGNEDP when an object whose type
7742 +is TYPE and which has the specified mode and signedness is to be
7743 +stored in a register. This macro is only called when TYPE is a
7744 +scalar type.
7745 +
7746 +On most RISC machines, which only have operations that operate on a full
7747 +register, define this macro to set M to word_mode if
7748 +M is an integer mode narrower than BITS_PER_WORD. In most
7749 +cases, only integer modes should be widened because wider-precision
7750 +floating-point operations are usually more expensive than their narrower
7751 +counterparts.
7752 +
7753 +For most machines, the macro definition does not change UNSIGNEDP.
7754 +However, some machines, have instructions that preferentially handle
7755 +either signed or unsigned quantities of certain modes. For example, on
7756 +the DEC Alpha, 32-bit loads from memory and 32-bit add instructions
7757 +sign-extend the result to 64 bits. On such machines, set
7758 +UNSIGNEDP according to which kind of extension is more efficient.
7759 +
7760 +Do not define this macro if it would never modify M.
7761 +*/
7762 +#define PROMOTE_MODE(M, UNSIGNEDP, TYPE) \
7763 + do \
7764 + { \
7765 + if (GET_MODE_CLASS (M) == MODE_INT \
7766 + && GET_MODE_SIZE (M) < 4) \
7767 + { \
7768 + (M) = SImode; \
7769 + } \
7770 + } \
7771 + while (0)
7772 +
7773 +/* Define if operations between registers always perform the operation
7774 + on the full register even if a narrower mode is specified. */
7775 +#define WORD_REGISTER_OPERATIONS
7776 +
7777 +/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
7778 + will either zero-extend or sign-extend. The value of this macro should
7779 + be the code that says which one of the two operations is implicitly
7780 + done, UNKNOWN if not known. */
7781 +#define LOAD_EXTEND_OP(MODE) \
7782 + (((MODE) == QImode) ? ZERO_EXTEND \
7783 + : ((MODE) == HImode) ? SIGN_EXTEND : UNKNOWN)
7784 +
7785 +
7786 +/*
7787 +Define this macro if the promotion described by PROMOTE_MODE
7788 +should only be performed for outgoing function arguments or
7789 +function return values, as specified by PROMOTE_FUNCTION_ARGS
7790 +and PROMOTE_FUNCTION_RETURN, respectively.
7791 +*/
7792 +/* #define PROMOTE_FOR_CALL_ONLY */
7793 +
7794 +/*
7795 +Normal alignment required for function parameters on the stack, in
7796 +bits. All stack parameters receive at least this much alignment
7797 +regardless of data type. On most machines, this is the same as the
7798 +size of an integer.
7799 +*/
7800 +#define PARM_BOUNDARY 32
7801 +
7802 +/*
7803 +Define this macro to the minimum alignment enforced by hardware for the
7804 +stack pointer on this machine. The definition is a C expression for the
7805 +desired alignment (measured in bits). This value is used as a default
7806 +if PREFERRED_STACK_BOUNDARY is not defined. On most machines,
7807 +this should be the same as PARM_BOUNDARY.
7808 +*/
7809 +#define STACK_BOUNDARY 32
7810 +
7811 +/*
7812 +Define this macro if you wish to preserve a certain alignment for the
7813 +stack pointer, greater than what the hardware enforces. The definition
7814 +is a C expression for the desired alignment (measured in bits). This
7815 +macro must evaluate to a value equal to or larger than
7816 +STACK_BOUNDARY.
7817 +*/
7818 +#define PREFERRED_STACK_BOUNDARY (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
7819 +
7820 +/*
7821 +Alignment required for a function entry point, in bits.
7822 +*/
7823 +#define FUNCTION_BOUNDARY 16
7824 +
7825 +/*
7826 +Biggest alignment that any data type can require on this machine, in bits.
7827 +*/
7828 +#define BIGGEST_ALIGNMENT (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
7829 +
7830 +/*
7831 +If defined, the smallest alignment, in bits, that can be given to an
7832 +object that can be referenced in one operation, without disturbing any
7833 +nearby object. Normally, this is BITS_PER_UNIT, but may be larger
7834 +on machines that don't have byte or half-word store operations.
7835 +*/
7836 +#define MINIMUM_ATOMIC_ALIGNMENT BITS_PER_UNIT
7837 +
7838 +
7839 +/*
7840 +An integer expression for the size in bits of the largest integer machine mode that
7841 +should actually be used. All integer machine modes of this size or smaller can be
7842 +used for structures and unions with the appropriate sizes. If this macro is undefined,
7843 +GET_MODE_BITSIZE (DImode) is assumed.*/
7844 +#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
7845 +
7846 +
7847 +/*
7848 +If defined, a C expression to compute the alignment given to a constant
7849 +that is being placed in memory. CONSTANT is the constant and
7850 +BASIC_ALIGN is the alignment that the object would ordinarily
7851 +have. The value of this macro is used instead of that alignment to
7852 +align the object.
7853 +
7854 +If this macro is not defined, then BASIC_ALIGN is used.
7855 +
7856 +The typical use of this macro is to increase alignment for string
7857 +constants to be word aligned so that strcpy calls that copy
7858 +constants can be done inline.
7859 +*/
7860 +#define CONSTANT_ALIGNMENT(CONSTANT, BASIC_ALIGN) \
7861 + ((TREE_CODE(CONSTANT) == STRING_CST) ? BITS_PER_WORD : BASIC_ALIGN)
7862 +
7863 +/* Try to align string to a word. */
7864 +#define DATA_ALIGNMENT(TYPE, ALIGN) \
7865 + ({(TREE_CODE (TYPE) == ARRAY_TYPE \
7866 + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
7867 + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
7868 +
7869 +/* Try to align local store strings to a word. */
7870 +#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
7871 + ({(TREE_CODE (TYPE) == ARRAY_TYPE \
7872 + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
7873 + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
7874 +
7875 +/*
7876 +Define this macro to be the value 1 if instructions will fail to work
7877 +if given data not on the nominal alignment. If instructions will merely
7878 +go slower in that case, define this macro as 0.
7879 +*/
7880 +#define STRICT_ALIGNMENT 1
7881 +
7882 +/*
7883 +Define this if you wish to imitate the way many other C compilers handle
7884 +alignment of bit-fields and the structures that contain them.
7885 +
7886 +The behavior is that the type written for a bit-field (int,
7887 +short, or other integer type) imposes an alignment for the
7888 +entire structure, as if the structure really did contain an ordinary
7889 +field of that type. In addition, the bit-field is placed within the
7890 +structure so that it would fit within such a field, not crossing a
7891 +boundary for it.
7892 +
7893 +Thus, on most machines, a bit-field whose type is written as int
7894 +would not cross a four-byte boundary, and would force four-byte
7895 +alignment for the whole structure. (The alignment used may not be four
7896 +bytes; it is controlled by the other alignment parameters.)
7897 +
7898 +If the macro is defined, its definition should be a C expression;
7899 +a nonzero value for the expression enables this behavior.
7900 +
7901 +Note that if this macro is not defined, or its value is zero, some
7902 +bit-fields may cross more than one alignment boundary. The compiler can
7903 +support such references if there are insv, extv, and
7904 +extzv insns that can directly reference memory.
7905 +
7906 +The other known way of making bit-fields work is to define
7907 +STRUCTURE_SIZE_BOUNDARY as large as BIGGEST_ALIGNMENT.
7908 +Then every structure can be accessed with fullwords.
7909 +
7910 +Unless the machine has bit-field instructions or you define
7911 +STRUCTURE_SIZE_BOUNDARY that way, you must define
7912 +PCC_BITFIELD_TYPE_MATTERS to have a nonzero value.
7913 +
7914 +If your aim is to make GCC use the same conventions for laying out
7915 +bit-fields as are used by another compiler, here is how to investigate
7916 +what the other compiler does. Compile and run this program:
7917 +
7918 +struct foo1
7919 +{
7920 + char x;
7921 + char :0;
7922 + char y;
7923 +};
7924 +
7925 +struct foo2
7926 +{
7927 + char x;
7928 + int :0;
7929 + char y;
7930 +};
7931 +
7932 +main ()
7933 +{
7934 + printf ("Size of foo1 is %d\n",
7935 + sizeof (struct foo1));
7936 + printf ("Size of foo2 is %d\n",
7937 + sizeof (struct foo2));
7938 + exit (0);
7939 +}
7940 +
7941 +If this prints 2 and 5, then the compiler's behavior is what you would
7942 +get from PCC_BITFIELD_TYPE_MATTERS.
7943 +*/
7944 +#define PCC_BITFIELD_TYPE_MATTERS 1
7945 +
7946 +
7947 +/******************************************************************************
7948 + * Layout of Source Language Data Types
7949 + *****************************************************************************/
7950 +
7951 +/*
7952 +A C expression for the size in bits of the type int on the
7953 +target machine. If you don't define this, the default is one word.
7954 +*/
7955 +#define INT_TYPE_SIZE 32
7956 +
7957 +/*
7958 +A C expression for the size in bits of the type short on the
7959 +target machine. If you don't define this, the default is half a word. (If
7960 +this would be less than one storage unit, it is rounded up to one unit.)
7961 +*/
7962 +#define SHORT_TYPE_SIZE 16
7963 +
7964 +/*
7965 +A C expression for the size in bits of the type long on the
7966 +target machine. If you don't define this, the default is one word.
7967 +*/
7968 +#define LONG_TYPE_SIZE 32
7969 +
7970 +
7971 +/*
7972 +A C expression for the size in bits of the type long long on the
7973 +target machine. If you don't define this, the default is two
7974 +words. If you want to support GNU Ada on your machine, the value of this
7975 +macro must be at least 64.
7976 +*/
7977 +#define LONG_LONG_TYPE_SIZE 64
7978 +
7979 +/*
7980 +A C expression for the size in bits of the type char on the
7981 +target machine. If you don't define this, the default is
7982 +BITS_PER_UNIT.
7983 +*/
7984 +#define CHAR_TYPE_SIZE 8
7985 +
7986 +
7987 +/*
7988 +A C expression for the size in bits of the C++ type bool and
7989 +C99 type _Bool on the target machine. If you don't define
7990 +this, and you probably shouldn't, the default is CHAR_TYPE_SIZE.
7991 +*/
7992 +#define BOOL_TYPE_SIZE 8
7993 +
7994 +
7995 +/*
7996 +An expression whose value is 1 or 0, according to whether the type
7997 +char should be signed or unsigned by default. The user can
7998 +always override this default with the options -fsigned-char
7999 +and -funsigned-char.
8000 +*/
8001 +/* We are using unsigned char */
8002 +#define DEFAULT_SIGNED_CHAR 0
8003 +
8004 +
8005 +/*
8006 +A C expression for a string describing the name of the data type to use
8007 +for size values. The typedef name size_t is defined using the
8008 +contents of the string.
8009 +
8010 +The string can contain more than one keyword. If so, separate them with
8011 +spaces, and write first any length keyword, then unsigned if
8012 +appropriate, and finally int. The string must exactly match one
8013 +of the data type names defined in the function
8014 +init_decl_processing in the file c-decl.c. You may not
8015 +omit int or change the order - that would cause the compiler to
8016 +crash on startup.
8017 +
8018 +If you don't define this macro, the default is "long unsigned int".
8019 +*/
8020 +#define SIZE_TYPE "long unsigned int"
8021 +
8022 +/*
8023 +A C expression for a string describing the name of the data type to use
8024 +for the result of subtracting two pointers. The typedef name
8025 +ptrdiff_t is defined using the contents of the string. See
8026 +SIZE_TYPE above for more information.
8027 +
8028 +If you don't define this macro, the default is "long int".
8029 +*/
8030 +#define PTRDIFF_TYPE "long int"
8031 +
8032 +
8033 +/*
8034 +A C expression for the size in bits of the data type for wide
8035 +characters. This is used in cpp, which cannot make use of
8036 +WCHAR_TYPE.
8037 +*/
8038 +#define WCHAR_TYPE_SIZE 32
8039 +
8040 +
8041 +/*
8042 +A C expression for a string describing the name of the data type to
8043 +use for wide characters passed to printf and returned from
8044 +getwc. The typedef name wint_t is defined using the
8045 +contents of the string. See SIZE_TYPE above for more
8046 +information.
8047 +
8048 +If you don't define this macro, the default is "unsigned int".
8049 +*/
8050 +#define WINT_TYPE "unsigned int"
8051 +
8052 +/*
8053 +A C expression for a string describing the name of the data type that
8054 +can represent any value of any standard or extended signed integer type.
8055 +The typedef name intmax_t is defined using the contents of the
8056 +string. See SIZE_TYPE above for more information.
8057 +
8058 +If you don't define this macro, the default is the first of
8059 +"int", "long int", or "long long int" that has as
8060 +much precision as long long int.
8061 +*/
8062 +#define INTMAX_TYPE "long long int"
8063 +
8064 +/*
8065 +A C expression for a string describing the name of the data type that
8066 +can represent any value of any standard or extended unsigned integer
8067 +type. The typedef name uintmax_t is defined using the contents
8068 +of the string. See SIZE_TYPE above for more information.
8069 +
8070 +If you don't define this macro, the default is the first of
8071 +"unsigned int", "long unsigned int", or "long long unsigned int"
8072 +that has as much precision as long long unsigned int.
8073 +*/
8074 +#define UINTMAX_TYPE "long long unsigned int"
8075 +
8076 +
8077 +/******************************************************************************
8078 + * Register Usage
8079 + *****************************************************************************/
8080 +
8081 +/* Convert from gcc internal register number to register number
8082 + used in assembly code */
8083 +#define ASM_REGNUM(reg) (LAST_REGNUM - (reg))
8084 +#define ASM_FP_REGNUM(reg) (LAST_FP_REGNUM - (reg))
8085 +
8086 +/* Convert between register number used in assembly to gcc
8087 + internal register number */
8088 +#define INTERNAL_REGNUM(reg) (LAST_REGNUM - (reg))
8089 +#define INTERNAL_FP_REGNUM(reg) (LAST_FP_REGNUM - (reg))
8090 +
8091 +/** Basic Characteristics of Registers **/
8092 +
8093 +/*
8094 +Number of hardware registers known to the compiler. They receive
8095 +numbers 0 through FIRST_PSEUDO_REGISTER-1; thus, the first
8096 +pseudo register's number really is assigned the number
8097 +FIRST_PSEUDO_REGISTER.
8098 +*/
8099 +#define FIRST_PSEUDO_REGISTER (LAST_FP_REGNUM + 1)
8100 +
8101 +#define FIRST_REGNUM 0
8102 +#define LAST_REGNUM 15
8103 +#define NUM_FP_REGS 16
8104 +#define FIRST_FP_REGNUM 16
8105 +#define LAST_FP_REGNUM (16+NUM_FP_REGS-1)
8106 +
8107 +/*
8108 +An initializer that says which registers are used for fixed purposes
8109 +all throughout the compiled code and are therefore not available for
8110 +general allocation. These would include the stack pointer, the frame
8111 +pointer (except on machines where that can be used as a general
8112 +register when no frame pointer is needed), the program counter on
8113 +machines where that is considered one of the addressable registers,
8114 +and any other numbered register with a standard use.
8115 +
8116 +This information is expressed as a sequence of numbers, separated by
8117 +commas and surrounded by braces. The nth number is 1 if
8118 +register n is fixed, 0 otherwise.
8119 +
8120 +The table initialized from this macro, and the table initialized by
8121 +the following one, may be overridden at run time either automatically,
8122 +by the actions of the macro CONDITIONAL_REGISTER_USAGE, or by
8123 +the user with the command options -ffixed-[reg],
8124 +-fcall-used-[reg] and -fcall-saved-[reg].
8125 +*/
8126 +
8127 +/* The internal gcc register numbers are reversed
8128 + compared to the real register numbers since
8129 + gcc expects data types stored over multiple
8130 + registers in the register file to be big endian
8131 + if the memory layout is big endian. But this
8132 + is not the case for avr32 so we fake a big
8133 + endian register file. */
8134 +
8135 +#define FIXED_REGISTERS { \
8136 + 1, /* Program Counter */ \
8137 + 0, /* Link Register */ \
8138 + 1, /* Stack Pointer */ \
8139 + 0, /* r12 */ \
8140 + 0, /* r11 */ \
8141 + 0, /* r10 */ \
8142 + 0, /* r9 */ \
8143 + 0, /* r8 */ \
8144 + 0, /* r7 */ \
8145 + 0, /* r6 */ \
8146 + 0, /* r5 */ \
8147 + 0, /* r4 */ \
8148 + 0, /* r3 */ \
8149 + 0, /* r2 */ \
8150 + 0, /* r1 */ \
8151 + 0, /* r0 */ \
8152 + 0, /* f15 */ \
8153 + 0, /* f14 */ \
8154 + 0, /* f13 */ \
8155 + 0, /* f12 */ \
8156 + 0, /* f11 */ \
8157 + 0, /* f10 */ \
8158 + 0, /* f9 */ \
8159 + 0, /* f8 */ \
8160 + 0, /* f7 */ \
8161 + 0, /* f6 */ \
8162 + 0, /* f5 */ \
8163 + 0, /* f4 */ \
8164 + 0, /* f3 */ \
8165 + 0, /* f2*/ \
8166 + 0, /* f1 */ \
8167 + 0 /* f0 */ \
8168 +}
8169 +
8170 +/*
8171 +Like FIXED_REGISTERS but has 1 for each register that is
8172 +clobbered (in general) by function calls as well as for fixed
8173 +registers. This macro therefore identifies the registers that are not
8174 +available for general allocation of values that must live across
8175 +function calls.
8176 +
8177 +If a register has 0 in CALL_USED_REGISTERS, the compiler
8178 +automatically saves it on function entry and restores it on function
8179 +exit, if the register is used within the function.
8180 +*/
8181 +#define CALL_USED_REGISTERS { \
8182 + 1, /* Program Counter */ \
8183 + 0, /* Link Register */ \
8184 + 1, /* Stack Pointer */ \
8185 + 1, /* r12 */ \
8186 + 1, /* r11 */ \
8187 + 1, /* r10 */ \
8188 + 1, /* r9 */ \
8189 + 1, /* r8 */ \
8190 + 0, /* r7 */ \
8191 + 0, /* r6 */ \
8192 + 0, /* r5 */ \
8193 + 0, /* r4 */ \
8194 + 0, /* r3 */ \
8195 + 0, /* r2 */ \
8196 + 0, /* r1 */ \
8197 + 0, /* r0 */ \
8198 + 1, /* f15 */ \
8199 + 1, /* f14 */ \
8200 + 1, /* f13 */ \
8201 + 1, /* f12 */ \
8202 + 1, /* f11 */ \
8203 + 1, /* f10 */ \
8204 + 1, /* f9 */ \
8205 + 1, /* f8 */ \
8206 + 0, /* f7 */ \
8207 + 0, /* f6 */ \
8208 + 0, /* f5 */ \
8209 + 0, /* f4 */ \
8210 + 0, /* f3 */ \
8211 + 0, /* f2*/ \
8212 + 0, /* f1*/ \
8213 + 0, /* f0 */ \
8214 +}
8215 +
8216 +/* Interrupt functions can only use registers that have already been
8217 + saved by the prologue, even if they would normally be
8218 + call-clobbered. */
8219 +#define HARD_REGNO_RENAME_OK(SRC, DST) \
8220 + (! IS_INTERRUPT (cfun->machine->func_type) || \
8221 + regs_ever_live[DST])
8222 +
8223 +
8224 +/*
8225 +Zero or more C statements that may conditionally modify five variables
8226 +fixed_regs, call_used_regs, global_regs,
8227 +reg_names, and reg_class_contents, to take into account
8228 +any dependence of these register sets on target flags. The first three
8229 +of these are of type char [] (interpreted as Boolean vectors).
8230 +global_regs is a const char *[], and
8231 +reg_class_contents is a HARD_REG_SET. Before the macro is
8232 +called, fixed_regs, call_used_regs,
8233 +reg_class_contents, and reg_names have been initialized
8234 +from FIXED_REGISTERS, CALL_USED_REGISTERS,
8235 +REG_CLASS_CONTENTS, and REGISTER_NAMES, respectively.
8236 +global_regs has been cleared, and any -ffixed-[reg],
8237 +-fcall-used-[reg] and -fcall-saved-[reg]
8238 +command options have been applied.
8239 +
8240 +You need not define this macro if it has no work to do.
8241 +
8242 +If the usage of an entire class of registers depends on the target
8243 +flags, you may indicate this to GCC by using this macro to modify
8244 +fixed_regs and call_used_regs to 1 for each of the
8245 +registers in the classes which should not be used by GCC. Also define
8246 +the macro REG_CLASS_FROM_LETTER to return NO_REGS if it
8247 +is called with a letter for a class that shouldn't be used.
8248 +
8249 + (However, if this class is not included in GENERAL_REGS and all
8250 +of the insn patterns whose constraints permit this class are
8251 +controlled by target switches, then GCC will automatically avoid using
8252 +these registers when the target switches are opposed to them.)
8253 +*/
8254 +#define CONDITIONAL_REGISTER_USAGE \
8255 + do \
8256 + { \
8257 + int regno; \
8258 + \
8259 + if (TARGET_SOFT_FLOAT) \
8260 + { \
8261 + for (regno = FIRST_FP_REGNUM; \
8262 + regno <= LAST_FP_REGNUM; ++regno) \
8263 + fixed_regs[regno] = call_used_regs[regno] = 1; \
8264 + } \
8265 + if (flag_pic) \
8266 + { \
8267 + fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
8268 + call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
8269 + } \
8270 + } \
8271 + while (0)
8272 +
8273 +
8274 +/*
8275 +If the program counter has a register number, define this as that
8276 +register number. Otherwise, do not define it.
8277 +*/
8278 +
8279 +#define LAST_AVR32_REGNUM 16
8280 +
8281 +
8282 +/** Order of Allocation of Registers **/
8283 +
8284 +/*
8285 +If defined, an initializer for a vector of integers, containing the
8286 +numbers of hard registers in the order in which GCC should prefer
8287 +to use them (from most preferred to least).
8288 +
8289 +If this macro is not defined, registers are used lowest numbered first
8290 +(all else being equal).
8291 +
8292 +One use of this macro is on machines where the highest numbered
8293 +registers must always be saved and the save-multiple-registers
8294 +instruction supports only sequences of consecutive registers. On such
8295 +machines, define REG_ALLOC_ORDER to be an initializer that lists
8296 +the highest numbered allocable register first.
8297 +*/
8298 +#define REG_ALLOC_ORDER \
8299 +{ \
8300 + INTERNAL_REGNUM(8), \
8301 + INTERNAL_REGNUM(9), \
8302 + INTERNAL_REGNUM(10), \
8303 + INTERNAL_REGNUM(11), \
8304 + INTERNAL_REGNUM(12), \
8305 + LR_REGNUM, \
8306 + INTERNAL_REGNUM(7), \
8307 + INTERNAL_REGNUM(6), \
8308 + INTERNAL_REGNUM(5), \
8309 + INTERNAL_REGNUM(4), \
8310 + INTERNAL_REGNUM(3), \
8311 + INTERNAL_REGNUM(2), \
8312 + INTERNAL_REGNUM(1), \
8313 + INTERNAL_REGNUM(0), \
8314 + INTERNAL_FP_REGNUM(15), \
8315 + INTERNAL_FP_REGNUM(14), \
8316 + INTERNAL_FP_REGNUM(13), \
8317 + INTERNAL_FP_REGNUM(12), \
8318 + INTERNAL_FP_REGNUM(11), \
8319 + INTERNAL_FP_REGNUM(10), \
8320 + INTERNAL_FP_REGNUM(9), \
8321 + INTERNAL_FP_REGNUM(8), \
8322 + INTERNAL_FP_REGNUM(7), \
8323 + INTERNAL_FP_REGNUM(6), \
8324 + INTERNAL_FP_REGNUM(5), \
8325 + INTERNAL_FP_REGNUM(4), \
8326 + INTERNAL_FP_REGNUM(3), \
8327 + INTERNAL_FP_REGNUM(2), \
8328 + INTERNAL_FP_REGNUM(1), \
8329 + INTERNAL_FP_REGNUM(0), \
8330 + SP_REGNUM, \
8331 + PC_REGNUM \
8332 +}
8333 +
8334 +
8335 +/** How Values Fit in Registers **/
8336 +
8337 +/*
8338 +A C expression for the number of consecutive hard registers, starting
8339 +at register number REGNO, required to hold a value of mode
8340 +MODE.
8341 +
8342 +On a machine where all registers are exactly one word, a suitable
8343 +definition of this macro is
8344 +
8345 +#define HARD_REGNO_NREGS(REGNO, MODE) \
8346 + ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
8347 + / UNITS_PER_WORD)
8348 +*/
8349 +#define HARD_REGNO_NREGS(REGNO, MODE) \
8350 + ((unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD -1 ) / UNITS_PER_WORD))
8351 +
8352 +/*
8353 +A C expression that is nonzero if it is permissible to store a value
8354 +of mode MODE in hard register number REGNO (or in several
8355 +registers starting with that one). For a machine where all registers
8356 +are equivalent, a suitable definition is
8357 +
8358 + #define HARD_REGNO_MODE_OK(REGNO, MODE) 1
8359 +
8360 +You need not include code to check for the numbers of fixed registers,
8361 +because the allocation mechanism considers them to be always occupied.
8362 +
8363 +On some machines, double-precision values must be kept in even/odd
8364 +register pairs. You can implement that by defining this macro to reject
8365 +odd register numbers for such modes.
8366 +
8367 +The minimum requirement for a mode to be OK in a register is that the
8368 +mov[mode] instruction pattern support moves between the
8369 +register and other hard register in the same class and that moving a
8370 +value into the register and back out not alter it.
8371 +
8372 +Since the same instruction used to move word_mode will work for
8373 +all narrower integer modes, it is not necessary on any machine for
8374 +HARD_REGNO_MODE_OK to distinguish between these modes, provided
8375 +you define patterns movhi, etc., to take advantage of this. This
8376 +is useful because of the interaction between HARD_REGNO_MODE_OK
8377 +and MODES_TIEABLE_P; it is very desirable for all integer modes
8378 +to be tieable.
8379 +
8380 +Many machines have special registers for floating point arithmetic.
8381 +Often people assume that floating point machine modes are allowed only
8382 +in floating point registers. This is not true. Any registers that
8383 +can hold integers can safely hold a floating point machine
8384 +mode, whether or not floating arithmetic can be done on it in those
8385 +registers. Integer move instructions can be used to move the values.
8386 +
8387 +On some machines, though, the converse is true: fixed-point machine
8388 +modes may not go in floating registers. This is true if the floating
8389 +registers normalize any value stored in them, because storing a
8390 +non-floating value there would garble it. In this case,
8391 +HARD_REGNO_MODE_OK should reject fixed-point machine modes in
8392 +floating registers. But if the floating registers do not automatically
8393 +normalize, if you can store any bit pattern in one and retrieve it
8394 +unchanged without a trap, then any machine mode may go in a floating
8395 +register, so you can define this macro to say so.
8396 +
8397 +The primary significance of special floating registers is rather that
8398 +they are the registers acceptable in floating point arithmetic
8399 +instructions. However, this is of no concern to
8400 +HARD_REGNO_MODE_OK. You handle it by writing the proper
8401 +constraints for those instructions.
8402 +
8403 +On some machines, the floating registers are especially slow to access,
8404 +so that it is better to store a value in a stack frame than in such a
8405 +register if floating point arithmetic is not being done. As long as the
8406 +floating registers are not in class GENERAL_REGS, they will not
8407 +be used unless some pattern's constraint asks for one.
8408 +*/
8409 +#define HARD_REGNO_MODE_OK(REGNO, MODE) avr32_hard_regno_mode_ok(REGNO, MODE)
8410 +
8411 +/*
8412 +A C expression that is nonzero if a value of mode
8413 +MODE1 is accessible in mode MODE2 without copying.
8414 +
8415 +If HARD_REGNO_MODE_OK(R, MODE1) and
8416 +HARD_REGNO_MODE_OK(R, MODE2) are always the same for
8417 +any R, then MODES_TIEABLE_P(MODE1, MODE2)
8418 +should be nonzero. If they differ for any R, you should define
8419 +this macro to return zero unless some other mechanism ensures the
8420 +accessibility of the value in a narrower mode.
8421 +
8422 +You should define this macro to return nonzero in as many cases as
8423 +possible since doing so will allow GCC to perform better register
8424 +allocation.
8425 +*/
8426 +#define MODES_TIEABLE_P(MODE1, MODE2) \
8427 + (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
8428 +
8429 +
8430 +
8431 +/******************************************************************************
8432 + * Register Classes
8433 + *****************************************************************************/
8434 +
8435 +/*
8436 +An enumeral type that must be defined with all the register class names
8437 +as enumeral values. NO_REGS must be first. ALL_REGS
8438 +must be the last register class, followed by one more enumeral value,
8439 +LIM_REG_CLASSES, which is not a register class but rather
8440 +tells how many classes there are.
8441 +
8442 +Each register class has a number, which is the value of casting
8443 +the class name to type int. The number serves as an index
8444 +in many of the tables described below.
8445 +*/
8446 +enum reg_class
8447 +{
8448 + NO_REGS,
8449 + GENERAL_REGS,
8450 + FP_REGS,
8451 + ALL_REGS,
8452 + LIM_REG_CLASSES
8453 +};
8454 +
8455 +/*
8456 +The number of distinct register classes, defined as follows:
8457 + #define N_REG_CLASSES (int) LIM_REG_CLASSES
8458 +*/
8459 +#define N_REG_CLASSES (int)LIM_REG_CLASSES
8460 +
8461 +/*
8462 +An initializer containing the names of the register classes as C string
8463 +constants. These names are used in writing some of the debugging dumps.
8464 +*/
8465 +#define REG_CLASS_NAMES \
8466 +{ \
8467 + "NO_REGS", \
8468 + "GENERAL_REGS", \
8469 + "FLOATING_POINT_REGS", \
8470 + "ALL_REGS" \
8471 +}
8472 +
8473 +/*
8474 +An initializer containing the contents of the register classes, as integers
8475 +which are bit masks. The nth integer specifies the contents of class
8476 +n. The way the integer mask is interpreted is that
8477 +register r is in the class if mask & (1 << r) is 1.
8478 +
8479 +When the machine has more than 32 registers, an integer does not suffice.
8480 +Then the integers are replaced by sub-initializers, braced groupings containing
8481 +several integers. Each sub-initializer must be suitable as an initializer
8482 +for the type HARD_REG_SET which is defined in hard-reg-set.h.
8483 +In this situation, the first integer in each sub-initializer corresponds to
8484 +registers 0 through 31, the second integer to registers 32 through 63, and
8485 +so on.
8486 +*/
8487 +#define REG_CLASS_CONTENTS { \
8488 + {0x00000000}, /* NO_REGS */ \
8489 + {0x0000FFFF}, /* GENERAL_REGS */ \
8490 + {0xFFFF0000}, /* FP_REGS */ \
8491 + {0x7FFFFFFF}, /* ALL_REGS */ \
8492 +}
8493 +
8494 +
8495 +/*
8496 +A C expression whose value is a register class containing hard register
8497 +REGNO. In general there is more than one such class; choose a class
8498 +which is minimal, meaning that no smaller class also contains the
8499 +register.
8500 +*/
8501 +#define REGNO_REG_CLASS(REGNO) ((REGNO < 16) ? GENERAL_REGS : FP_REGS)
8502 +
8503 +/*
8504 +A macro whose definition is the name of the class to which a valid
8505 +base register must belong. A base register is one used in an address
8506 +which is the register value plus a displacement.
8507 +*/
8508 +#define BASE_REG_CLASS GENERAL_REGS
8509 +
8510 +/*
8511 +This is a variation of the BASE_REG_CLASS macro which allows
8512 +the selection of a base register in a mode depenedent manner. If
8513 +mode is VOIDmode then it should return the same value as
8514 +BASE_REG_CLASS.
8515 +*/
8516 +#define MODE_BASE_REG_CLASS(MODE) BASE_REG_CLASS
8517 +
8518 +/*
8519 +A macro whose definition is the name of the class to which a valid
8520 +index register must belong. An index register is one used in an
8521 +address where its value is either multiplied by a scale factor or
8522 +added to another register (as well as added to a displacement).
8523 +*/
8524 +#define INDEX_REG_CLASS BASE_REG_CLASS
8525 +
8526 +/*
8527 +A C expression which defines the machine-dependent operand constraint
8528 +letters for register classes. If CHAR is such a letter, the
8529 +value should be the register class corresponding to it. Otherwise,
8530 +the value should be NO_REGS. The register letter r,
8531 +corresponding to class GENERAL_REGS, will not be passed
8532 +to this macro; you do not need to handle it.
8533 +*/
8534 +#define REG_CLASS_FROM_LETTER(CHAR) ((CHAR) == 'f' ? FP_REGS : NO_REGS)
8535 +
8536 +
8537 +/* These assume that REGNO is a hard or pseudo reg number.
8538 + They give nonzero only if REGNO is a hard reg of the suitable class
8539 + or a pseudo reg currently allocated to a suitable hard reg.
8540 + Since they use reg_renumber, they are safe only once reg_renumber
8541 + has been allocated, which happens in local-alloc.c. */
8542 +#define TEST_REGNO(R, TEST, VALUE) \
8543 + ((R TEST VALUE) || ((unsigned) reg_renumber[R] TEST VALUE))
8544 +
8545 +/*
8546 +A C expression which is nonzero if register number num is suitable for use as a base
8547 +register in operand addresses. It may be either a suitable hard register or a pseudo
8548 +register that has been allocated such a hard register.
8549 +*/
8550 +#define REGNO_OK_FOR_BASE_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
8551 +
8552 +/*
8553 +A C expression which is nonzero if register number NUM is
8554 +suitable for use as an index register in operand addresses. It may be
8555 +either a suitable hard register or a pseudo register that has been
8556 +allocated such a hard register.
8557 +
8558 +The difference between an index register and a base register is that
8559 +the index register may be scaled. If an address involves the sum of
8560 +two registers, neither one of them scaled, then either one may be
8561 +labeled the ``base'' and the other the ``index''; but whichever
8562 +labeling is used must fit the machine's constraints of which registers
8563 +may serve in each capacity. The compiler will try both labelings,
8564 +looking for one that is valid, and will reload one or both registers
8565 +only if neither labeling works.
8566 +*/
8567 +#define REGNO_OK_FOR_INDEX_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
8568 +
8569 +/*
8570 +A C expression that places additional restrictions on the register class
8571 +to use when it is necessary to copy value X into a register in class
8572 +CLASS. The value is a register class; perhaps CLASS, or perhaps
8573 +another, smaller class. On many machines, the following definition is
8574 +safe: #define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS
8575 +
8576 +Sometimes returning a more restrictive class makes better code. For
8577 +example, on the 68000, when X is an integer constant that is in range
8578 +for a 'moveq' instruction, the value of this macro is always
8579 +DATA_REGS as long as CLASS includes the data registers.
8580 +Requiring a data register guarantees that a 'moveq' will be used.
8581 +
8582 +If X is a const_double, by returning NO_REGS
8583 +you can force X into a memory constant. This is useful on
8584 +certain machines where immediate floating values cannot be loaded into
8585 +certain kinds of registers.
8586 +*/
8587 +#define PREFERRED_RELOAD_CLASS(X, CLASS) CLASS
8588 +
8589 +
8590 +
8591 +/*
8592 +A C expression for the maximum number of consecutive registers
8593 +of class CLASS needed to hold a value of mode MODE.
8594 +
8595 +This is closely related to the macro HARD_REGNO_NREGS. In fact,
8596 +the value of the macro CLASS_MAX_NREGS(CLASS, MODE)
8597 +should be the maximum value of HARD_REGNO_NREGS(REGNO, MODE)
8598 +for all REGNO values in the class CLASS.
8599 +
8600 +This macro helps control the handling of multiple-word values
8601 +in the reload pass.
8602 +*/
8603 +#define CLASS_MAX_NREGS(CLASS, MODE) /* ToDo:fixme */ \
8604 + (unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
8605 +
8606 +
8607 +/*
8608 + Using CONST_OK_FOR_CONSTRAINT_P instead of CONS_OK_FOR_LETTER_P
8609 + in order to support constraints with more than one letter.
8610 + Only two letters are then used for constant constraints,
8611 + the letter 'K' and the letter 'I'. The constraint starting with
8612 + these letters must consist of four characters. The character following
8613 + 'K' or 'I' must be either 'u' (unsigned) or 's' (signed) to specify
8614 + if the constant is zero or sign extended. The last two characters specify
8615 + the length in bits of the constant. The base constraint letter 'I' means
8616 + that this is an negated constant, meaning that actually -VAL should be
8617 + checked to lie withing the valid range instead of VAL which is used when
8618 + 'K' is the base constraint letter.
8619 +
8620 +*/
8621 +
8622 +#define CONSTRAINT_LEN(C, STR) \
8623 + ( ((C) == 'K' || (C) == 'I') ? 4 : \
8624 + ((C) == 'R') ? 5 : \
8625 + ((C) == 'N' || (C) == 'O' || \
8626 + (C) == 'P' || (C) == 'L' || (C) == 'J') ? -1 : \
8627 + DEFAULT_CONSTRAINT_LEN((C), (STR)) )
8628 +
8629 +#define CONST_OK_FOR_CONSTRAINT_P(VALUE, C, STR) \
8630 + avr32_const_ok_for_constraint_p(VALUE, C, STR)
8631 +
8632 +/*
8633 +A C expression that defines the machine-dependent operand constraint
8634 +letters that specify particular ranges of const_double values ('G' or 'H').
8635 +
8636 +If C is one of those letters, the expression should check that
8637 +VALUE, an RTX of code const_double, is in the appropriate
8638 +range and return 1 if so, 0 otherwise. If C is not one of those
8639 +letters, the value should be 0 regardless of VALUE.
8640 +
8641 +const_double is used for all floating-point constants and for
8642 +DImode fixed-point constants. A given letter can accept either
8643 +or both kinds of values. It can use GET_MODE to distinguish
8644 +between these kinds.
8645 +*/
8646 +#define CONST_DOUBLE_OK_FOR_LETTER_P(OP, C) \
8647 + ((C) == 'G' ? avr32_const_double_immediate(OP) : 0)
8648 +
8649 +/*
8650 +A C expression that defines the optional machine-dependent constraint
8651 +letters that can be used to segregate specific types of operands, usually
8652 +memory references, for the target machine. Any letter that is not
8653 +elsewhere defined and not matched by REG_CLASS_FROM_LETTER
8654 +may be used. Normally this macro will not be defined.
8655 +
8656 +If it is required for a particular target machine, it should return 1
8657 +if VALUE corresponds to the operand type represented by the
8658 +constraint letter C. If C is not defined as an extra
8659 +constraint, the value returned should be 0 regardless of VALUE.
8660 +
8661 +For example, on the ROMP, load instructions cannot have their output
8662 +in r0 if the memory reference contains a symbolic address. Constraint
8663 +letter 'Q' is defined as representing a memory address that does
8664 +not contain a symbolic address. An alternative is specified with
8665 +a 'Q' constraint on the input and 'r' on the output. The next
8666 +alternative specifies 'm' on the input and a register class that
8667 +does not include r0 on the output.
8668 +*/
8669 +#define EXTRA_CONSTRAINT_STR(OP, C, STR) \
8670 + ((C) == 'W' ? avr32_address_operand(OP, GET_MODE(OP)) : \
8671 + (C) == 'R' ? (avr32_indirect_register_operand(OP, GET_MODE(OP)) || \
8672 + (avr32_imm_disp_memory_operand(OP, GET_MODE(OP)) \
8673 + && avr32_const_ok_for_constraint_p( \
8674 + INTVAL(XEXP(XEXP(OP, 0), 1)), \
8675 + (STR)[1], &(STR)[1]))) : \
8676 + (C) == 'S' ? avr32_indexed_memory_operand(OP, GET_MODE(OP)) : \
8677 + (C) == 'T' ? avr32_const_pool_ref_operand(OP, GET_MODE(OP)) : \
8678 + (C) == 'U' ? SYMBOL_REF_RCALL_FUNCTION_P(OP) : \
8679 + (C) == 'Z' ? avr32_cop_memory_operand(OP, GET_MODE(OP)) : \
8680 + 0)
8681 +
8682 +
8683 +#define EXTRA_MEMORY_CONSTRAINT(C, STR) ( ((C) == 'R') || \
8684 + ((C) == 'S') || \
8685 + ((C) == 'Z') )
8686 +
8687 +
8688 +/* Returns nonzero if op is a function SYMBOL_REF which
8689 + can be called using an rcall instruction */
8690 +#define SYMBOL_REF_RCALL_FUNCTION_P(op) \
8691 + ( GET_CODE(op) == SYMBOL_REF \
8692 + && SYMBOL_REF_FUNCTION_P(op) \
8693 + && SYMBOL_REF_LOCAL_P(op) \
8694 + && !SYMBOL_REF_EXTERNAL_P(op) \
8695 + && !TARGET_HAS_ASM_ADDR_PSEUDOS )
8696 +
8697 +/******************************************************************************
8698 + * Stack Layout and Calling Conventions
8699 + *****************************************************************************/
8700 +
8701 +/** Basic Stack Layout **/
8702 +
8703 +/*
8704 +Define this macro if pushing a word onto the stack moves the stack
8705 +pointer to a smaller address.
8706 +
8707 +When we say, ``define this macro if ...,'' it means that the
8708 +compiler checks this macro only with #ifdef so the precise
8709 +definition used does not matter.
8710 +*/
8711 +/* pushm decrece SP: *(--SP) <-- Rx */
8712 +#define STACK_GROWS_DOWNWARD
8713 +
8714 +/*
8715 +This macro defines the operation used when something is pushed
8716 +on the stack. In RTL, a push operation will be
8717 +(set (mem (STACK_PUSH_CODE (reg sp))) ...)
8718 +
8719 +The choices are PRE_DEC, POST_DEC, PRE_INC,
8720 +and POST_INC. Which of these is correct depends on
8721 +the stack direction and on whether the stack pointer points
8722 +to the last item on the stack or whether it points to the
8723 +space for the next item on the stack.
8724 +
8725 +The default is PRE_DEC when STACK_GROWS_DOWNWARD is
8726 +defined, which is almost always right, and PRE_INC otherwise,
8727 +which is often wrong.
8728 +*/
8729 +/* pushm: *(--SP) <-- Rx */
8730 +#define STACK_PUSH_CODE PRE_DEC
8731 +
8732 +/* Define this to nonzero if the nominal address of the stack frame
8733 + is at the high-address end of the local variables;
8734 + that is, each additional local variable allocated
8735 + goes at a more negative offset in the frame. */
8736 +#define FRAME_GROWS_DOWNWARD 1
8737 +
8738 +
8739 +/*
8740 +Offset from the frame pointer to the first local variable slot to be allocated.
8741 +
8742 +If FRAME_GROWS_DOWNWARD, find the next slot's offset by
8743 +subtracting the first slot's length from STARTING_FRAME_OFFSET.
8744 +Otherwise, it is found by adding the length of the first slot to the
8745 +value STARTING_FRAME_OFFSET.
8746 + (i'm not sure if the above is still correct.. had to change it to get
8747 + rid of an overfull. --mew 2feb93 )
8748 +*/
8749 +#define STARTING_FRAME_OFFSET 0
8750 +
8751 +/*
8752 +Offset from the stack pointer register to the first location at which
8753 +outgoing arguments are placed. If not specified, the default value of
8754 +zero is used. This is the proper value for most machines.
8755 +
8756 +If ARGS_GROW_DOWNWARD, this is the offset to the location above
8757 +the first location at which outgoing arguments are placed.
8758 +*/
8759 +#define STACK_POINTER_OFFSET 0
8760 +
8761 +/*
8762 +Offset from the argument pointer register to the first argument's
8763 +address. On some machines it may depend on the data type of the
8764 +function.
8765 +
8766 +If ARGS_GROW_DOWNWARD, this is the offset to the location above
8767 +the first argument's address.
8768 +*/
8769 +#define FIRST_PARM_OFFSET(FUNDECL) 0
8770 +
8771 +
8772 +/*
8773 +A C expression whose value is RTL representing the address in a stack
8774 +frame where the pointer to the caller's frame is stored. Assume that
8775 +FRAMEADDR is an RTL expression for the address of the stack frame
8776 +itself.
8777 +
8778 +If you don't define this macro, the default is to return the value
8779 +of FRAMEADDR - that is, the stack frame address is also the
8780 +address of the stack word that points to the previous frame.
8781 +*/
8782 +#define DYNAMIC_CHAIN_ADDRESS(FRAMEADDR) plus_constant ((FRAMEADDR), 4)
8783 +
8784 +
8785 +/*
8786 +A C expression whose value is RTL representing the value of the return
8787 +address for the frame COUNT steps up from the current frame, after
8788 +the prologue. FRAMEADDR is the frame pointer of the COUNT
8789 +frame, or the frame pointer of the COUNT - 1 frame if
8790 +RETURN_ADDR_IN_PREVIOUS_FRAME is defined.
8791 +
8792 +The value of the expression must always be the correct address when
8793 +COUNT is zero, but may be NULL_RTX if there is not way to
8794 +determine the return address of other frames.
8795 +*/
8796 +#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) avr32_return_addr(COUNT, FRAMEADDR)
8797 +
8798 +
8799 +/*
8800 +A C expression whose value is RTL representing the location of the
8801 +incoming return address at the beginning of any function, before the
8802 +prologue. This RTL is either a REG, indicating that the return
8803 +value is saved in 'REG', or a MEM representing a location in
8804 +the stack.
8805 +
8806 +You only need to define this macro if you want to support call frame
8807 +debugging information like that provided by DWARF 2.
8808 +
8809 +If this RTL is a REG, you should also define
8810 +DWARF_FRAME_RETURN_COLUMN to DWARF_FRAME_REGNUM (REGNO).
8811 +*/
8812 +#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
8813 +
8814 +
8815 +
8816 +/*
8817 +A C expression whose value is an integer giving the offset, in bytes,
8818 +from the value of the stack pointer register to the top of the stack
8819 +frame at the beginning of any function, before the prologue. The top of
8820 +the frame is defined to be the value of the stack pointer in the
8821 +previous frame, just before the call instruction.
8822 +
8823 +You only need to define this macro if you want to support call frame
8824 +debugging information like that provided by DWARF 2.
8825 +*/
8826 +#define INCOMING_FRAME_SP_OFFSET 0
8827 +
8828 +
8829 +/** Exception Handling Support **/
8830 +
8831 +#define DWARF2_UNWIND_INFO 1
8832 +
8833 +/*
8834 +A C expression whose value is the Nth register number used for
8835 +data by exception handlers, or INVALID_REGNUM if fewer than
8836 +N registers are usable.
8837 +
8838 +The exception handling library routines communicate with the exception
8839 +handlers via a set of agreed upon registers. Ideally these registers
8840 +should be call-clobbered; it is possible to use call-saved registers,
8841 +but may negatively impact code size. The target must support at least
8842 +2 data registers, but should define 4 if there are enough free registers.
8843 +
8844 +You must define this macro if you want to support call frame exception
8845 +handling like that provided by DWARF 2.
8846 +*/
8847 +/*
8848 + Use r8-r11
8849 +*/
8850 +#define EH_RETURN_DATA_REGNO(N) \
8851 + ((N) < 4 ? INTERNAL_REGNUM((N) + 8U) : INVALID_REGNUM)
8852 +
8853 +/*
8854 +A C expression whose value is RTL representing a location in which
8855 +to store a stack adjustment to be applied before function return.
8856 +This is used to unwind the stack to an exception handler's call frame.
8857 +It will be assigned zero on code paths that return normally.
8858 +
8859 +Typically this is a call-clobbered hard register that is otherwise
8860 +untouched by the epilogue, but could also be a stack slot.
8861 +
8862 +You must define this macro if you want to support call frame exception
8863 +handling like that provided by DWARF 2.
8864 +*/
8865 +/*
8866 + I don't think functions that may throw exceptions can ever be leaf
8867 + functions, so we may safely use LR for this.
8868 +*/
8869 +#define EH_RETURN_STACKADJ_REGNO LR_REGNUM
8870 +#define EH_RETURN_STACKADJ_RTX gen_rtx_REG(SImode, EH_RETURN_STACKADJ_REGNO)
8871 +
8872 +/*
8873 +A C expression whose value is RTL representing a location in which
8874 +to store the address of an exception handler to which we should
8875 +return. It will not be assigned on code paths that return normally.
8876 +
8877 +Typically this is the location in the call frame at which the normal
8878 +return address is stored. For targets that return by popping an
8879 +address off the stack, this might be a memory address just below
8880 +the target call frame rather than inside the current call
8881 +frame. EH_RETURN_STACKADJ_RTX will have already been assigned,
8882 +so it may be used to calculate the location of the target call frame.
8883 +
8884 +Some targets have more complex requirements than storing to an
8885 +address calculable during initial code generation. In that case
8886 +the eh_return instruction pattern should be used instead.
8887 +
8888 +If you want to support call frame exception handling, you must
8889 +define either this macro or the eh_return instruction pattern.
8890 +*/
8891 +/*
8892 + We define the eh_return instruction pattern, so this isn't needed.
8893 +*/
8894 +/* #define EH_RETURN_HANDLER_RTX gen_rtx_REG(Pmode, RET_REGISTER) */
8895 +
8896 +/*
8897 + This macro chooses the encoding of pointers embedded in the
8898 + exception handling sections. If at all possible, this should be
8899 + defined such that the exception handling section will not require
8900 + dynamic relocations, and so may be read-only.
8901 +
8902 + code is 0 for data, 1 for code labels, 2 for function
8903 + pointers. global is true if the symbol may be affected by dynamic
8904 + relocations. The macro should return a combination of the DW_EH_PE_*
8905 + defines as found in dwarf2.h.
8906 +
8907 + If this macro is not defined, pointers will not be encoded but
8908 + represented directly.
8909 +*/
8910 +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
8911 + ((flag_pic && (GLOBAL) ? DW_EH_PE_indirect : 0) \
8912 + | (flag_pic ? DW_EH_PE_pcrel : DW_EH_PE_absptr) \
8913 + | DW_EH_PE_sdata4)
8914 +
8915 +/* ToDo: The rest of this subsection */
8916 +
8917 +/** Specifying How Stack Checking is Done **/
8918 +/* ToDo: All in this subsection */
8919 +
8920 +/** Registers That Address the Stack Frame **/
8921 +
8922 +/*
8923 +The register number of the stack pointer register, which must also be a
8924 +fixed register according to FIXED_REGISTERS. On most machines,
8925 +the hardware determines which register this is.
8926 +*/
8927 +/* Using r13 as stack pointer. */
8928 +#define STACK_POINTER_REGNUM INTERNAL_REGNUM(13)
8929 +
8930 +/*
8931 +The register number of the frame pointer register, which is used to
8932 +access automatic variables in the stack frame. On some machines, the
8933 +hardware determines which register this is. On other machines, you can
8934 +choose any register you wish for this purpose.
8935 +*/
8936 +/* Use r7 */
8937 +#define FRAME_POINTER_REGNUM INTERNAL_REGNUM(7)
8938 +
8939 +
8940 +
8941 +/*
8942 +The register number of the arg pointer register, which is used to access
8943 +the function's argument list. On some machines, this is the same as the
8944 +frame pointer register. On some machines, the hardware determines which
8945 +register this is. On other machines, you can choose any register you
8946 +wish for this purpose. If this is not the same register as the frame
8947 +pointer register, then you must mark it as a fixed register according to
8948 +FIXED_REGISTERS, or arrange to be able to eliminate it (see Section
8949 +10.10.5 [Elimination], page 224).
8950 +*/
8951 +/* Using r5 */
8952 +#define ARG_POINTER_REGNUM INTERNAL_REGNUM(4)
8953 +
8954 +
8955 +/*
8956 +Register numbers used for passing a function's static chain pointer. If
8957 +register windows are used, the register number as seen by the called
8958 +function is STATIC_CHAIN_INCOMING_REGNUM, while the register
8959 +number as seen by the calling function is STATIC_CHAIN_REGNUM. If
8960 +these registers are the same, STATIC_CHAIN_INCOMING_REGNUM need
8961 +not be defined.
8962 +
8963 +The static chain register need not be a fixed register.
8964 +
8965 +If the static chain is passed in memory, these macros should not be
8966 +defined; instead, the next two macros should be defined.
8967 +*/
8968 +/* Using r0 */
8969 +#define STATIC_CHAIN_REGNUM INTERNAL_REGNUM(0)
8970 +
8971 +
8972 +/** Eliminating Frame Pointer and Arg Pointer **/
8973 +
8974 +/*
8975 +A C expression which is nonzero if a function must have and use a frame
8976 +pointer. This expression is evaluated in the reload pass. If its value is
8977 +nonzero the function will have a frame pointer.
8978 +
8979 +The expression can in principle examine the current function and decide
8980 +according to the facts, but on most machines the constant 0 or the
8981 +constant 1 suffices. Use 0 when the machine allows code to be generated
8982 +with no frame pointer, and doing so saves some time or space. Use 1
8983 +when there is no possible advantage to avoiding a frame pointer.
8984 +
8985 +In certain cases, the compiler does not know how to produce valid code
8986 +without a frame pointer. The compiler recognizes those cases and
8987 +automatically gives the function a frame pointer regardless of what
8988 +FRAME_POINTER_REQUIRED says. You don't need to worry about
8989 +them.
8990 +
8991 +In a function that does not require a frame pointer, the frame pointer
8992 +register can be allocated for ordinary usage, unless you mark it as a
8993 +fixed register. See FIXED_REGISTERS for more information.
8994 +*/
8995 +/* We need the frame pointer when compiling for profiling */
8996 +#define FRAME_POINTER_REQUIRED (current_function_profile)
8997 +
8998 +/*
8999 +A C statement to store in the variable DEPTH_VAR the difference
9000 +between the frame pointer and the stack pointer values immediately after
9001 +the function prologue. The value would be computed from information
9002 +such as the result of get_frame_size () and the tables of
9003 +registers regs_ever_live and call_used_regs.
9004 +
9005 +If ELIMINABLE_REGS is defined, this macro will be not be used and
9006 +need not be defined. Otherwise, it must be defined even if
9007 +FRAME_POINTER_REQUIRED is defined to always be true; in that
9008 +case, you may set DEPTH_VAR to anything.
9009 +*/
9010 +#define INITIAL_FRAME_POINTER_OFFSET(DEPTH_VAR) ((DEPTH_VAR) = get_frame_size())
9011 +
9012 +/*
9013 +If defined, this macro specifies a table of register pairs used to
9014 +eliminate unneeded registers that point into the stack frame. If it is not
9015 +defined, the only elimination attempted by the compiler is to replace
9016 +references to the frame pointer with references to the stack pointer.
9017 +
9018 +The definition of this macro is a list of structure initializations, each
9019 +of which specifies an original and replacement register.
9020 +
9021 +On some machines, the position of the argument pointer is not known until
9022 +the compilation is completed. In such a case, a separate hard register
9023 +must be used for the argument pointer. This register can be eliminated by
9024 +replacing it with either the frame pointer or the argument pointer,
9025 +depending on whether or not the frame pointer has been eliminated.
9026 +
9027 +In this case, you might specify:
9028 + #define ELIMINABLE_REGS \
9029 + {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
9030 + {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
9031 + {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
9032 +
9033 +Note that the elimination of the argument pointer with the stack pointer is
9034 +specified first since that is the preferred elimination.
9035 +*/
9036 +#define ELIMINABLE_REGS \
9037 +{ \
9038 + { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
9039 + { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
9040 + { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM } \
9041 +}
9042 +
9043 +/*
9044 +A C expression that returns nonzero if the compiler is allowed to try
9045 +to replace register number FROM with register number
9046 +TO. This macro need only be defined if ELIMINABLE_REGS
9047 +is defined, and will usually be the constant 1, since most of the cases
9048 +preventing register elimination are things that the compiler already
9049 +knows about.
9050 +*/
9051 +#define CAN_ELIMINATE(FROM, TO) 1
9052 +
9053 +/*
9054 +This macro is similar to INITIAL_FRAME_POINTER_OFFSET. It
9055 +specifies the initial difference between the specified pair of
9056 +registers. This macro must be defined if ELIMINABLE_REGS is
9057 +defined.
9058 +*/
9059 +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
9060 + ((OFFSET) = avr32_initial_elimination_offset(FROM, TO))
9061 +
9062 +/** Passing Function Arguments on the Stack **/
9063 +
9064 +
9065 +/*
9066 +A C expression. If nonzero, push insns will be used to pass
9067 +outgoing arguments.
9068 +If the target machine does not have a push instruction, set it to zero.
9069 +That directs GCC to use an alternate strategy: to
9070 +allocate the entire argument block and then store the arguments into
9071 +it. When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too.
9072 +*/
9073 +#define PUSH_ARGS 1
9074 +
9075 +
9076 +/*
9077 +A C expression that is the number of bytes actually pushed onto the
9078 +stack when an instruction attempts to push NPUSHED bytes.
9079 +
9080 +On some machines, the definition
9081 +
9082 + #define PUSH_ROUNDING(BYTES) (BYTES)
9083 +
9084 +will suffice. But on other machines, instructions that appear
9085 +to push one byte actually push two bytes in an attempt to maintain
9086 +alignment. Then the definition should be
9087 +
9088 + #define PUSH_ROUNDING(BYTES) (((BYTES) + 1) & ~1)
9089 +*/
9090 +/* Push 4 bytes at the time. */
9091 +#define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3)
9092 +
9093 +/*
9094 +A C expression. If nonzero, the maximum amount of space required for
9095 +outgoing arguments will be computed and placed into the variable
9096 +current_function_outgoing_args_size. No space will be pushed
9097 +onto the stack for each call; instead, the function prologue should
9098 +increase the stack frame size by this amount.
9099 +
9100 +Setting both PUSH_ARGS and ACCUMULATE_OUTGOING_ARGS is not proper.
9101 +*/
9102 +#define ACCUMULATE_OUTGOING_ARGS 0
9103 +
9104 +
9105 +
9106 +
9107 +/*
9108 +A C expression that should indicate the number of bytes of its own
9109 +arguments that a function pops on returning, or 0 if the
9110 +function pops no arguments and the caller must therefore pop them all
9111 +after the function returns.
9112 +
9113 +FUNDECL is a C variable whose value is a tree node that describes
9114 +the function in question. Normally it is a node of type
9115 +FUNCTION_DECL that describes the declaration of the function.
9116 +From this you can obtain the DECL_ATTRIBUTES of the function.
9117 +
9118 +FUNTYPE is a C variable whose value is a tree node that
9119 +describes the function in question. Normally it is a node of type
9120 +FUNCTION_TYPE that describes the data type of the function.
9121 +From this it is possible to obtain the data types of the value and
9122 +arguments (if known).
9123 +
9124 +When a call to a library function is being considered, FUNDECL
9125 +will contain an identifier node for the library function. Thus, if
9126 +you need to distinguish among various library functions, you can do so
9127 +by their names. Note that ``library function'' in this context means
9128 +a function used to perform arithmetic, whose name is known specially
9129 +in the compiler and was not mentioned in the C code being compiled.
9130 +
9131 +STACK_SIZE is the number of bytes of arguments passed on the
9132 +stack. If a variable number of bytes is passed, it is zero, and
9133 +argument popping will always be the responsibility of the calling function.
9134 +
9135 +On the VAX, all functions always pop their arguments, so the definition
9136 +of this macro is STACK_SIZE. On the 68000, using the standard
9137 +calling convention, no functions pop their arguments, so the value of
9138 +the macro is always 0 in this case. But an alternative calling
9139 +convention is available in which functions that take a fixed number of
9140 +arguments pop them but other functions (such as printf) pop
9141 +nothing (the caller pops all). When this convention is in use,
9142 +FUNTYPE is examined to determine whether a function takes a fixed
9143 +number of arguments.
9144 +*/
9145 +#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACK_SIZE) 0
9146 +
9147 +
9148 +/*Return true if this function can we use a single return instruction*/
9149 +#define USE_RETURN_INSN(ISCOND) avr32_use_return_insn(ISCOND)
9150 +
9151 +/*
9152 +A C expression that should indicate the number of bytes a call sequence
9153 +pops off the stack. It is added to the value of RETURN_POPS_ARGS
9154 +when compiling a function call.
9155 +
9156 +CUM is the variable in which all arguments to the called function
9157 +have been accumulated.
9158 +
9159 +On certain architectures, such as the SH5, a call trampoline is used
9160 +that pops certain registers off the stack, depending on the arguments
9161 +that have been passed to the function. Since this is a property of the
9162 +call site, not of the called function, RETURN_POPS_ARGS is not
9163 +appropriate.
9164 +*/
9165 +#define CALL_POPS_ARGS(CUM) 0
9166 +
9167 +/* Passing Arguments in Registers */
9168 +
9169 +/*
9170 +A C expression that controls whether a function argument is passed
9171 +in a register, and which register.
9172 +
9173 +The arguments are CUM, which summarizes all the previous
9174 +arguments; MODE, the machine mode of the argument; TYPE,
9175 +the data type of the argument as a tree node or 0 if that is not known
9176 +(which happens for C support library functions); and NAMED,
9177 +which is 1 for an ordinary argument and 0 for nameless arguments that
9178 +correspond to '...' in the called function's prototype.
9179 +TYPE can be an incomplete type if a syntax error has previously
9180 +occurred.
9181 +
9182 +The value of the expression is usually either a reg RTX for the
9183 +hard register in which to pass the argument, or zero to pass the
9184 +argument on the stack.
9185 +
9186 +For machines like the VAX and 68000, where normally all arguments are
9187 +pushed, zero suffices as a definition.
9188 +
9189 +The value of the expression can also be a parallel RTX. This is
9190 +used when an argument is passed in multiple locations. The mode of the
9191 +of the parallel should be the mode of the entire argument. The
9192 +parallel holds any number of expr_list pairs; each one
9193 +describes where part of the argument is passed. In each
9194 +expr_list the first operand must be a reg RTX for the hard
9195 +register in which to pass this part of the argument, and the mode of the
9196 +register RTX indicates how large this part of the argument is. The
9197 +second operand of the expr_list is a const_int which gives
9198 +the offset in bytes into the entire argument of where this part starts.
9199 +As a special exception the first expr_list in the parallel
9200 +RTX may have a first operand of zero. This indicates that the entire
9201 +argument is also stored on the stack.
9202 +
9203 +The last time this macro is called, it is called with MODE == VOIDmode,
9204 +and its result is passed to the call or call_value
9205 +pattern as operands 2 and 3 respectively.
9206 +
9207 +The usual way to make the ISO library 'stdarg.h' work on a machine
9208 +where some arguments are usually passed in registers, is to cause
9209 +nameless arguments to be passed on the stack instead. This is done
9210 +by making FUNCTION_ARG return 0 whenever NAMED is 0.
9211 +
9212 +You may use the macro MUST_PASS_IN_STACK (MODE, TYPE)
9213 +in the definition of this macro to determine if this argument is of a
9214 +type that must be passed in the stack. If REG_PARM_STACK_SPACE
9215 +is not defined and FUNCTION_ARG returns nonzero for such an
9216 +argument, the compiler will abort. If REG_PARM_STACK_SPACE is
9217 +defined, the argument will be computed in the stack and then loaded into
9218 +a register. */
9219 +
9220 +#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
9221 + avr32_function_arg(&(CUM), MODE, TYPE, NAMED)
9222 +
9223 +
9224 +
9225 +
9226 +/*
9227 +A C type for declaring a variable that is used as the first argument of
9228 +FUNCTION_ARG and other related values. For some target machines,
9229 +the type int suffices and can hold the number of bytes of
9230 +argument so far.
9231 +
9232 +There is no need to record in CUMULATIVE_ARGS anything about the
9233 +arguments that have been passed on the stack. The compiler has other
9234 +variables to keep track of that. For target machines on which all
9235 +arguments are passed on the stack, there is no need to store anything in
9236 +CUMULATIVE_ARGS; however, the data structure must exist and
9237 +should not be empty, so use int.
9238 +*/
9239 +typedef struct avr32_args
9240 +{
9241 + /* Index representing the argument register the current function argument
9242 + will occupy */
9243 + int index;
9244 + /* A mask with bits representing the argument registers: if a bit is set
9245 + then this register is used for an arguemnt */
9246 + int used_index;
9247 + /* TRUE if this function has anonymous arguments */
9248 + int uses_anonymous_args;
9249 + /* The size in bytes of the named arguments pushed on the stack */
9250 + int stack_pushed_args_size;
9251 + /* Set to true if this function needs a Return Value Pointer */
9252 + int use_rvp;
9253 +
9254 +} CUMULATIVE_ARGS;
9255 +
9256 +
9257 +#define FIRST_CUM_REG_INDEX 0
9258 +#define LAST_CUM_REG_INDEX 4
9259 +#define GET_REG_INDEX(CUM) ((CUM)->index)
9260 +#define SET_REG_INDEX(CUM, INDEX) ((CUM)->index = (INDEX));
9261 +#define GET_USED_INDEX(CUM, INDEX) ((CUM)->used_index & (1 << (INDEX)))
9262 +#define SET_USED_INDEX(CUM, INDEX) \
9263 + do \
9264 + { \
9265 + if (INDEX >= 0) \
9266 + (CUM)->used_index |= (1 << (INDEX)); \
9267 + } \
9268 + while (0)
9269 +#define SET_INDEXES_UNUSED(CUM) ((CUM)->used_index = 0)
9270 +
9271 +
9272 +/*
9273 + A C statement (sans semicolon) for initializing the variable cum for the
9274 + state at the beginning of the argument list. The variable has type
9275 + CUMULATIVE_ARGS. The value of FNTYPE is the tree node for the data type of
9276 + the function which will receive the args, or 0 if the args are to a compiler
9277 + support library function. For direct calls that are not libcalls, FNDECL
9278 + contain the declaration node of the function. FNDECL is also set when
9279 + INIT_CUMULATIVE_ARGS is used to find arguments for the function being
9280 + compiled. N_NAMED_ARGS is set to the number of named arguments, including a
9281 + structure return address if it is passed as a parameter, when making a call.
9282 + When processing incoming arguments, N_NAMED_ARGS is set to -1.
9283 +
9284 + When processing a call to a compiler support library function, LIBNAME
9285 + identifies which one. It is a symbol_ref rtx which contains the name of the
9286 + function, as a string. LIBNAME is 0 when an ordinary C function call is
9287 + being processed. Thus, each time this macro is called, either LIBNAME or
9288 + FNTYPE is nonzero, but never both of them at once.
9289 +*/
9290 +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
9291 + avr32_init_cumulative_args(&(CUM), FNTYPE, LIBNAME, FNDECL)
9292 +
9293 +
9294 +/*
9295 +A C statement (sans semicolon) to update the summarizer variable
9296 +CUM to advance past an argument in the argument list. The
9297 +values MODE, TYPE and NAMED describe that argument.
9298 +Once this is done, the variable CUM is suitable for analyzing
9299 +the following argument with FUNCTION_ARG, etc.
9300 +
9301 +This macro need not do anything if the argument in question was passed
9302 +on the stack. The compiler knows how to track the amount of stack space
9303 +used for arguments without any special help.
9304 +*/
9305 +#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
9306 + avr32_function_arg_advance(&(CUM), MODE, TYPE, NAMED)
9307 +
9308 +/*
9309 +If defined, a C expression which determines whether, and in which direction,
9310 +to pad out an argument with extra space. The value should be of type
9311 +enum direction: either 'upward' to pad above the argument,
9312 +'downward' to pad below, or 'none' to inhibit padding.
9313 +
9314 +The amount of padding is always just enough to reach the next
9315 +multiple of FUNCTION_ARG_BOUNDARY; this macro does not control
9316 +it.
9317 +
9318 +This macro has a default definition which is right for most systems.
9319 +For little-endian machines, the default is to pad upward. For
9320 +big-endian machines, the default is to pad downward for an argument of
9321 +constant size shorter than an int, and upward otherwise.
9322 +*/
9323 +#define FUNCTION_ARG_PADDING(MODE, TYPE) \
9324 + avr32_function_arg_padding(MODE, TYPE)
9325 +
9326 +/*
9327 + Specify padding for the last element of a block move between registers
9328 + and memory. First is nonzero if this is the only element. Defining
9329 + this macro allows better control of register function parameters on
9330 + big-endian machines, without using PARALLEL rtl. In particular,
9331 + MUST_PASS_IN_STACK need not test padding and mode of types in registers,
9332 + as there is no longer a "wrong" part of a register; For example, a three
9333 + byte aggregate may be passed in the high part of a register if so required.
9334 +*/
9335 +#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
9336 + avr32_function_arg_padding(MODE, TYPE)
9337 +
9338 +/*
9339 +If defined, a C expression which determines whether the default
9340 +implementation of va_arg will attempt to pad down before reading the
9341 +next argument, if that argument is smaller than its aligned space as
9342 +controlled by PARM_BOUNDARY. If this macro is not defined, all such
9343 +arguments are padded down if BYTES_BIG_ENDIAN is true.
9344 +*/
9345 +#define PAD_VARARGS_DOWN \
9346 + (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward)
9347 +
9348 +
9349 +/*
9350 +A C expression that is nonzero if REGNO is the number of a hard
9351 +register in which function arguments are sometimes passed. This does
9352 +not include implicit arguments such as the static chain and
9353 +the structure-value address. On many machines, no registers can be
9354 +used for this purpose since all function arguments are pushed on the
9355 +stack.
9356 +*/
9357 +/*
9358 + Use r8 - r12 for function arguments.
9359 +*/
9360 +#define FUNCTION_ARG_REGNO_P(REGNO) \
9361 + (REGNO >= 3 && REGNO <= 7)
9362 +
9363 +/* Number of registers used for passing function arguments */
9364 +#define NUM_ARG_REGS 5
9365 +
9366 +/*
9367 +If defined, the order in which arguments are loaded into their
9368 +respective argument registers is reversed so that the last
9369 +argument is loaded first. This macro only affects arguments
9370 +passed in registers.
9371 +*/
9372 +/* #define LOAD_ARGS_REVERSED */
9373 +
9374 +/** How Scalar Function Values Are Returned **/
9375 +
9376 +/* AVR32 is using r12 as return register. */
9377 +#define RET_REGISTER (15 - 12)
9378 +
9379 +/*
9380 +Define this macro if -traditional should not cause functions
9381 +declared to return float to convert the value to double.
9382 +*/
9383 +/* #define TRADITIONAL_RETURN_FLOAT */
9384 +
9385 +/*
9386 +A C expression to create an RTX representing the place where a
9387 +function returns a value of data type VALTYPE. VALTYPE is
9388 +a tree node representing a data type. Write TYPE_MODE(VALTYPE)
9389 +to get the machine mode used to represent that type.
9390 +On many machines, only the mode is relevant. (Actually, on most
9391 +machines, scalar values are returned in the same place regardless of
9392 +mode).
9393 +
9394 +The value of the expression is usually a reg RTX for the hard
9395 +register where the return value is stored. The value can also be a
9396 +parallel RTX, if the return value is in multiple places. See
9397 +FUNCTION_ARG for an explanation of the parallel form.
9398 +
9399 +If PROMOTE_FUNCTION_RETURN is defined, you must apply the same
9400 +promotion rules specified in PROMOTE_MODE if VALTYPE is a
9401 +scalar type.
9402 +
9403 +If the precise function being called is known, FUNC is a tree
9404 +node (FUNCTION_DECL) for it; otherwise, FUNC is a null
9405 +pointer. This makes it possible to use a different value-returning
9406 +convention for specific functions when all their calls are
9407 +known.
9408 +
9409 +FUNCTION_VALUE is not used for return vales with aggregate data
9410 +types, because these are returned in another way. See
9411 +STRUCT_VALUE_REGNUM and related macros, below.
9412 +*/
9413 +#define FUNCTION_VALUE(VALTYPE, FUNC) avr32_function_value(VALTYPE, FUNC)
9414 +
9415 +
9416 +/*
9417 +A C expression to create an RTX representing the place where a library
9418 +function returns a value of mode MODE. If the precise function
9419 +being called is known, FUNC is a tree node
9420 +(FUNCTION_DECL) for it; otherwise, func is a null
9421 +pointer. This makes it possible to use a different value-returning
9422 +convention for specific functions when all their calls are
9423 +known.
9424 +
9425 +Note that "library function" in this context means a compiler
9426 +support routine, used to perform arithmetic, whose name is known
9427 +specially by the compiler and was not mentioned in the C code being
9428 +compiled.
9429 +
9430 +The definition of LIBRARY_VALUE need not be concerned aggregate
9431 +data types, because none of the library functions returns such types.
9432 +*/
9433 +#define LIBCALL_VALUE(MODE) avr32_libcall_value(MODE)
9434 +
9435 +/*
9436 +A C expression that is nonzero if REGNO is the number of a hard
9437 +register in which the values of called function may come back.
9438 +
9439 +A register whose use for returning values is limited to serving as the
9440 +second of a pair (for a value of type double, say) need not be
9441 +recognized by this macro. So for most machines, this definition
9442 +suffices:
9443 + #define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
9444 +
9445 +If the machine has register windows, so that the caller and the called
9446 +function use different registers for the return value, this macro
9447 +should recognize only the caller's register numbers.
9448 +*/
9449 +/*
9450 + When returning a value of mode DImode, r11:r10 is used, else r12 is used.
9451 +*/
9452 +#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == RET_REGISTER \
9453 + || (REGNO) == INTERNAL_REGNUM(11))
9454 +
9455 +
9456 +/** How Large Values Are Returned **/
9457 +
9458 +
9459 +/*
9460 +Define this macro to be 1 if all structure and union return values must be
9461 +in memory. Since this results in slower code, this should be defined
9462 +only if needed for compatibility with other compilers or with an ABI.
9463 +If you define this macro to be 0, then the conventions used for structure
9464 +and union return values are decided by the RETURN_IN_MEMORY macro.
9465 +
9466 +If not defined, this defaults to the value 1.
9467 +*/
9468 +#define DEFAULT_PCC_STRUCT_RETURN 0
9469 +
9470 +
9471 +
9472 +
9473 +/** Generating Code for Profiling **/
9474 +
9475 +/*
9476 +A C statement or compound statement to output to FILE some
9477 +assembler code to call the profiling subroutine mcount.
9478 +
9479 +The details of how mcount expects to be called are determined by
9480 +your operating system environment, not by GCC. To figure them out,
9481 +compile a small program for profiling using the system's installed C
9482 +compiler and look at the assembler code that results.
9483 +
9484 +Older implementations of mcount expect the address of a counter
9485 +variable to be loaded into some register. The name of this variable is
9486 +'LP' followed by the number LABELNO, so you would generate
9487 +the name using 'LP%d' in a fprintf.
9488 +*/
9489 +/* ToDo: fixme */
9490 +#ifndef FUNCTION_PROFILER
9491 +#define FUNCTION_PROFILER(FILE, LABELNO) \
9492 + fprintf((FILE), "/* profiler %d */", (LABELNO))
9493 +#endif
9494 +
9495 +
9496 +/*****************************************************************************
9497 + * Trampolines for Nested Functions *
9498 + *****************************************************************************/
9499 +
9500 +/*
9501 +A C statement to output, on the stream FILE, assembler code for a
9502 +block of data that contains the constant parts of a trampoline. This
9503 +code should not include a label - the label is taken care of
9504 +automatically.
9505 +
9506 +If you do not define this macro, it means no template is needed
9507 +for the target. Do not define this macro on systems where the block move
9508 +code to copy the trampoline into place would be larger than the code
9509 +to generate it on the spot.
9510 +*/
9511 +/* ToDo: correct? */
9512 +#define TRAMPOLINE_TEMPLATE(FILE) avr32_trampoline_template(FILE);
9513 +
9514 +
9515 +/*
9516 +A C expression for the size in bytes of the trampoline, as an integer.
9517 +*/
9518 +/* ToDo: fixme */
9519 +#define TRAMPOLINE_SIZE 0x0C
9520 +
9521 +/*
9522 +Alignment required for trampolines, in bits.
9523 +
9524 +If you don't define this macro, the value of BIGGEST_ALIGNMENT
9525 +is used for aligning trampolines.
9526 +*/
9527 +#define TRAMPOLINE_ALIGNMENT 16
9528 +
9529 +/*
9530 +A C statement to initialize the variable parts of a trampoline.
9531 +ADDR is an RTX for the address of the trampoline; FNADDR is
9532 +an RTX for the address of the nested function; STATIC_CHAIN is an
9533 +RTX for the static chain value that should be passed to the function
9534 +when it is called.
9535 +*/
9536 +#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, STATIC_CHAIN) \
9537 + avr32_initialize_trampoline(ADDR, FNADDR, STATIC_CHAIN)
9538 +
9539 +
9540 +/******************************************************************************
9541 + * Implicit Calls to Library Routines
9542 + *****************************************************************************/
9543 +
9544 +/* Tail calling. */
9545 +
9546 +/* A C expression that evaluates to true if it is ok to perform a sibling
9547 + call to DECL. */
9548 +#define FUNCTION_OK_FOR_SIBCALL(DECL) 0
9549 +
9550 +#define OVERRIDE_OPTIONS avr32_override_options ()
9551 +
9552 +
9553 +
9554 +/******************************************************************************
9555 + * Addressing Modes
9556 + *****************************************************************************/
9557 +
9558 +/*
9559 +A C expression that is nonzero if the machine supports pre-increment,
9560 +pre-decrement, post-increment, or post-decrement addressing respectively.
9561 +*/
9562 +/*
9563 + AVR32 supports Rp++ and --Rp
9564 +*/
9565 +#define HAVE_PRE_INCREMENT 0
9566 +#define HAVE_PRE_DECREMENT 1
9567 +#define HAVE_POST_INCREMENT 1
9568 +#define HAVE_POST_DECREMENT 0
9569 +
9570 +/*
9571 +A C expression that is nonzero if the machine supports pre- or
9572 +post-address side-effect generation involving constants other than
9573 +the size of the memory operand.
9574 +*/
9575 +#define HAVE_PRE_MODIFY_DISP 0
9576 +#define HAVE_POST_MODIFY_DISP 0
9577 +
9578 +/*
9579 +A C expression that is nonzero if the machine supports pre- or
9580 +post-address side-effect generation involving a register displacement.
9581 +*/
9582 +#define HAVE_PRE_MODIFY_REG 0
9583 +#define HAVE_POST_MODIFY_REG 0
9584 +
9585 +/*
9586 +A C expression that is 1 if the RTX X is a constant which
9587 +is a valid address. On most machines, this can be defined as
9588 +CONSTANT_P (X), but a few machines are more restrictive
9589 +in which constant addresses are supported.
9590 +
9591 +CONSTANT_P accepts integer-values expressions whose values are
9592 +not explicitly known, such as symbol_ref, label_ref, and
9593 +high expressions and const arithmetic expressions, in
9594 +addition to const_int and const_double expressions.
9595 +*/
9596 +#define CONSTANT_ADDRESS_P(X) CONSTANT_P(X)
9597 +
9598 +/*
9599 +A number, the maximum number of registers that can appear in a valid
9600 +memory address. Note that it is up to you to specify a value equal to
9601 +the maximum number that GO_IF_LEGITIMATE_ADDRESS would ever
9602 +accept.
9603 +*/
9604 +#define MAX_REGS_PER_ADDRESS 2
9605 +
9606 +/*
9607 +A C compound statement with a conditional goto LABEL;
9608 +executed if X (an RTX) is a legitimate memory address on the
9609 +target machine for a memory operand of mode MODE.
9610 +
9611 +It usually pays to define several simpler macros to serve as
9612 +subroutines for this one. Otherwise it may be too complicated to
9613 +understand.
9614 +
9615 +This macro must exist in two variants: a strict variant and a
9616 +non-strict one. The strict variant is used in the reload pass. It
9617 +must be defined so that any pseudo-register that has not been
9618 +allocated a hard register is considered a memory reference. In
9619 +contexts where some kind of register is required, a pseudo-register
9620 +with no hard register must be rejected.
9621 +
9622 +The non-strict variant is used in other passes. It must be defined to
9623 +accept all pseudo-registers in every context where some kind of
9624 +register is required.
9625 +
9626 +Compiler source files that want to use the strict variant of this
9627 +macro define the macro REG_OK_STRICT. You should use an
9628 +#ifdef REG_OK_STRICT conditional to define the strict variant
9629 +in that case and the non-strict variant otherwise.
9630 +
9631 +Subroutines to check for acceptable registers for various purposes (one
9632 +for base registers, one for index registers, and so on) are typically
9633 +among the subroutines used to define GO_IF_LEGITIMATE_ADDRESS.
9634 +Then only these subroutine macros need have two variants; the higher
9635 +levels of macros may be the same whether strict or not.
9636 +
9637 +Normally, constant addresses which are the sum of a symbol_ref
9638 +and an integer are stored inside a const RTX to mark them as
9639 +constant. Therefore, there is no need to recognize such sums
9640 +specifically as legitimate addresses. Normally you would simply
9641 +recognize any const as legitimate.
9642 +
9643 +Usually PRINT_OPERAND_ADDRESS is not prepared to handle constant
9644 +sums that are not marked with const. It assumes that a naked
9645 +plus indicates indexing. If so, then you must reject such
9646 +naked constant sums as illegitimate addresses, so that none of them will
9647 +be given to PRINT_OPERAND_ADDRESS.
9648 +
9649 +On some machines, whether a symbolic address is legitimate depends on
9650 +the section that the address refers to. On these machines, define the
9651 +macro ENCODE_SECTION_INFO to store the information into the
9652 +symbol_ref, and then check for it here. When you see a
9653 +const, you will have to look inside it to find the
9654 +symbol_ref in order to determine the section.
9655 +
9656 +The best way to modify the name string is by adding text to the
9657 +beginning, with suitable punctuation to prevent any ambiguity. Allocate
9658 +the new name in saveable_obstack. You will have to modify
9659 +ASM_OUTPUT_LABELREF to remove and decode the added text and
9660 +output the name accordingly, and define STRIP_NAME_ENCODING to
9661 +access the original name string.
9662 +
9663 +You can check the information stored here into the symbol_ref in
9664 +the definitions of the macros GO_IF_LEGITIMATE_ADDRESS and
9665 +PRINT_OPERAND_ADDRESS.
9666 +*/
9667 +#ifdef REG_OK_STRICT
9668 +# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
9669 + do \
9670 + { \
9671 + if (avr32_legitimate_address(MODE, X, 1)) \
9672 + goto LABEL; \
9673 + } \
9674 + while (0)
9675 +#else
9676 +# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
9677 + do \
9678 + { \
9679 + if (avr32_legitimate_address(MODE, X, 0)) \
9680 + goto LABEL; \
9681 + } \
9682 + while (0)
9683 +#endif
9684 +
9685 +/*
9686 +A C expression that is nonzero if X (assumed to be a reg
9687 +RTX) is valid for use as a base register. For hard registers, it
9688 +should always accept those which the hardware permits and reject the
9689 +others. Whether the macro accepts or rejects pseudo registers must be
9690 +controlled by REG_OK_STRICT as described above. This usually
9691 +requires two variant definitions, of which REG_OK_STRICT
9692 +controls the one actually used.
9693 +*/
9694 +#ifdef REG_OK_STRICT
9695 +# define REG_OK_FOR_BASE_P(X) \
9696 + REGNO_OK_FOR_BASE_P(REGNO(X))
9697 +#else
9698 +# define REG_OK_FOR_BASE_P(X) \
9699 + ((REGNO(X) <= LAST_REGNUM) || (REGNO(X) >= FIRST_PSEUDO_REGISTER))
9700 +#endif
9701 +
9702 +
9703 +/*
9704 +A C expression that is nonzero if X (assumed to be a reg
9705 +RTX) is valid for use as an index register.
9706 +
9707 +The difference between an index register and a base register is that
9708 +the index register may be scaled. If an address involves the sum of
9709 +two registers, neither one of them scaled, then either one may be
9710 +labeled the "base" and the other the "index"; but whichever
9711 +labeling is used must fit the machine's constraints of which registers
9712 +may serve in each capacity. The compiler will try both labelings,
9713 +looking for one that is valid, and will reload one or both registers
9714 +only if neither labeling works.
9715 +*/
9716 +#define REG_OK_FOR_INDEX_P(X) \
9717 + REG_OK_FOR_BASE_P(X)
9718 +
9719 +
9720 +/*
9721 +A C compound statement that attempts to replace X with a valid
9722 +memory address for an operand of mode MODE. win will be a
9723 +C statement label elsewhere in the code; the macro definition may use
9724 +
9725 + GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN);
9726 +
9727 +to avoid further processing if the address has become legitimate.
9728 +
9729 +X will always be the result of a call to break_out_memory_refs,
9730 +and OLDX will be the operand that was given to that function to produce
9731 +X.
9732 +
9733 +The code generated by this macro should not alter the substructure of
9734 +X. If it transforms X into a more legitimate form, it
9735 +should assign X (which will always be a C variable) a new value.
9736 +
9737 +It is not necessary for this macro to come up with a legitimate
9738 +address. The compiler has standard ways of doing so in all cases. In
9739 +fact, it is safe for this macro to do nothing. But often a
9740 +machine-dependent strategy can generate better code.
9741 +*/
9742 +#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
9743 + do \
9744 + { \
9745 + if (GET_CODE(X) == PLUS \
9746 + && GET_CODE(XEXP(X, 0)) == REG \
9747 + && GET_CODE(XEXP(X, 1)) == CONST_INT \
9748 + && !CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(X, 1)), \
9749 + 'K', "Ks16")) \
9750 + { \
9751 + rtx index = force_reg(SImode, XEXP(X, 1)); \
9752 + X = gen_rtx_PLUS( SImode, XEXP(X, 0), index); \
9753 + } \
9754 + GO_IF_LEGITIMATE_ADDRESS(MODE, X, WIN); \
9755 + } \
9756 + while(0)
9757 +
9758 +
9759 +/*
9760 +A C statement or compound statement with a conditional
9761 +goto LABEL; executed if memory address X (an RTX) can have
9762 +different meanings depending on the machine mode of the memory
9763 +reference it is used for or if the address is valid for some modes
9764 +but not others.
9765 +
9766 +Autoincrement and autodecrement addresses typically have mode-dependent
9767 +effects because the amount of the increment or decrement is the size
9768 +of the operand being addressed. Some machines have other mode-dependent
9769 +addresses. Many RISC machines have no mode-dependent addresses.
9770 +
9771 +You may assume that ADDR is a valid address for the machine.
9772 +*/
9773 +#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
9774 + do \
9775 + { \
9776 + if (GET_CODE (ADDR) == POST_INC \
9777 + || GET_CODE (ADDR) == PRE_DEC) \
9778 + goto LABEL; \
9779 + } \
9780 + while (0)
9781 +
9782 +/*
9783 +A C expression that is nonzero if X is a legitimate constant for
9784 +an immediate operand on the target machine. You can assume that
9785 +X satisfies CONSTANT_P, so you need not check this. In fact,
9786 +'1' is a suitable definition for this macro on machines where
9787 +anything CONSTANT_P is valid.
9788 +*/
9789 +#define LEGITIMATE_CONSTANT_P(X) avr32_legitimate_constant_p(X)
9790 +
9791 +
9792 +/******************************************************************************
9793 + * Condition Code Status
9794 + *****************************************************************************/
9795 +
9796 +#define HAVE_conditional_move 1
9797 +
9798 +/*
9799 +C code for a data type which is used for declaring the mdep
9800 +component of cc_status. It defaults to int.
9801 +
9802 +This macro is not used on machines that do not use cc0.
9803 +*/
9804 +
9805 +typedef struct
9806 +{
9807 + int flags;
9808 + rtx value;
9809 + int fpflags;
9810 + rtx fpvalue;
9811 +} avr32_status_reg;
9812 +
9813 +
9814 +#define CC_STATUS_MDEP avr32_status_reg
9815 +
9816 +/*
9817 +A C expression to initialize the mdep field to "empty".
9818 +The default definition does nothing, since most machines don't use
9819 +the field anyway. If you want to use the field, you should probably
9820 +define this macro to initialize it.
9821 +
9822 +This macro is not used on machines that do not use cc0.
9823 +*/
9824 +
9825 +#define CC_STATUS_MDEP_INIT \
9826 + (cc_status.mdep.flags = CC_NONE , cc_status.mdep.value = 0)
9827 +
9828 +#define FPCC_STATUS_INIT \
9829 + (cc_status.mdep.fpflags = CC_NONE , cc_status.mdep.fpvalue = 0)
9830 +
9831 +/*
9832 +A C compound statement to set the components of cc_status
9833 +appropriately for an insn INSN whose body is EXP. It is
9834 +this macro's responsibility to recognize insns that set the condition
9835 +code as a byproduct of other activity as well as those that explicitly
9836 +set (cc0).
9837 +
9838 +This macro is not used on machines that do not use cc0.
9839 +
9840 +If there are insns that do not set the condition code but do alter
9841 +other machine registers, this macro must check to see whether they
9842 +invalidate the expressions that the condition code is recorded as
9843 +reflecting. For example, on the 68000, insns that store in address
9844 +registers do not set the condition code, which means that usually
9845 +NOTICE_UPDATE_CC can leave cc_status unaltered for such
9846 +insns. But suppose that the previous insn set the condition code
9847 +based on location 'a4@@(102)' and the current insn stores a new
9848 +value in 'a4'. Although the condition code is not changed by
9849 +this, it will no longer be true that it reflects the contents of
9850 +'a4@@(102)'. Therefore, NOTICE_UPDATE_CC must alter
9851 +cc_status in this case to say that nothing is known about the
9852 +condition code value.
9853 +
9854 +The definition of NOTICE_UPDATE_CC must be prepared to deal
9855 +with the results of peephole optimization: insns whose patterns are
9856 +parallel RTXs containing various reg, mem or
9857 +constants which are just the operands. The RTL structure of these
9858 +insns is not sufficient to indicate what the insns actually do. What
9859 +NOTICE_UPDATE_CC should do when it sees one is just to run
9860 +CC_STATUS_INIT.
9861 +
9862 +A possible definition of NOTICE_UPDATE_CC is to call a function
9863 +that looks at an attribute (see Insn Attributes) named, for example,
9864 +'cc'. This avoids having detailed information about patterns in
9865 +two places, the 'md' file and in NOTICE_UPDATE_CC.
9866 +*/
9867 +
9868 +#define NOTICE_UPDATE_CC(EXP, INSN) avr32_notice_update_cc(EXP, INSN)
9869 +
9870 +
9871 +
9872 +
9873 +/******************************************************************************
9874 + * Describing Relative Costs of Operations
9875 + *****************************************************************************/
9876 +
9877 +
9878 +
9879 +/*
9880 +A C expression for the cost of moving data of mode MODE from a
9881 +register in class FROM to one in class TO. The classes are
9882 +expressed using the enumeration values such as GENERAL_REGS. A
9883 +value of 2 is the default; other values are interpreted relative to
9884 +that.
9885 +
9886 +It is not required that the cost always equal 2 when FROM is the
9887 +same as TO; on some machines it is expensive to move between
9888 +registers if they are not general registers.
9889 +
9890 +If reload sees an insn consisting of a single set between two
9891 +hard registers, and if REGISTER_MOVE_COST applied to their
9892 +classes returns a value of 2, reload does not check to ensure that the
9893 +constraints of the insn are met. Setting a cost of other than 2 will
9894 +allow reload to verify that the constraints are met. You should do this
9895 +if the movm pattern's constraints do not allow such copying.
9896 +*/
9897 +#define REGISTER_MOVE_COST(MODE, FROM, TO) \
9898 + ((GET_MODE_SIZE(MODE) <= 4) ? 2: \
9899 + (GET_MODE_SIZE(MODE) <= 8) ? 3: \
9900 + 4)
9901 +
9902 +/*
9903 +A C expression for the cost of moving data of mode MODE between a
9904 +register of class CLASS and memory; IN is zero if the value
9905 +is to be written to memory, nonzero if it is to be read in. This cost
9906 +is relative to those in REGISTER_MOVE_COST. If moving between
9907 +registers and memory is more expensive than between two registers, you
9908 +should define this macro to express the relative cost.
9909 +
9910 +If you do not define this macro, GCC uses a default cost of 4 plus
9911 +the cost of copying via a secondary reload register, if one is
9912 +needed. If your machine requires a secondary reload register to copy
9913 +between memory and a register of CLASS but the reload mechanism is
9914 +more complex than copying via an intermediate, define this macro to
9915 +reflect the actual cost of the move.
9916 +
9917 +GCC defines the function memory_move_secondary_cost if
9918 +secondary reloads are needed. It computes the costs due to copying via
9919 +a secondary register. If your machine copies from memory using a
9920 +secondary register in the conventional way but the default base value of
9921 +4 is not correct for your machine, define this macro to add some other
9922 +value to the result of that function. The arguments to that function
9923 +are the same as to this macro.
9924 +*/
9925 +/*
9926 + Memory moves are costly
9927 +*/
9928 +#define MEMORY_MOVE_COST(MODE, CLASS, IN) 10
9929 +/*
9930 + (((IN) ? ((GET_MODE_SIZE(MODE) < 4) ? 4 : \
9931 + (GET_MODE_SIZE(MODE) > 8) ? 6 : \
9932 + 3) \
9933 + : ((GET_MODE_SIZE(MODE) > 8) ? 4 : 2)))
9934 +*/
9935 +
9936 +/*
9937 +A C expression for the cost of a branch instruction. A value of 1 is
9938 +the default; other values are interpreted relative to that.
9939 +*/
9940 + /* Try to use conditionals as much as possible */
9941 +#define BRANCH_COST (TARGET_BRANCH_PRED ? 3 : 5)
9942 +
9943 +/*A C expression for the maximum number of instructions to execute via conditional
9944 + execution instructions instead of a branch. A value of BRANCH_COST+1 is the default
9945 + if the machine does not use cc0, and 1 if it does use cc0.*/
9946 +#define MAX_CONDITIONAL_EXECUTE 3
9947 +
9948 +/*
9949 +Define this macro as a C expression which is nonzero if accessing less
9950 +than a word of memory (i.e.: a char or a short) is no
9951 +faster than accessing a word of memory, i.e., if such access
9952 +require more than one instruction or if there is no difference in cost
9953 +between byte and (aligned) word loads.
9954 +
9955 +When this macro is not defined, the compiler will access a field by
9956 +finding the smallest containing object; when it is defined, a fullword
9957 +load will be used if alignment permits. Unless bytes accesses are
9958 +faster than word accesses, using word accesses is preferable since it
9959 +may eliminate subsequent memory access if subsequent accesses occur to
9960 +other fields in the same word of the structure, but to different bytes.
9961 +*/
9962 +#define SLOW_BYTE_ACCESS 1
9963 +
9964 +
9965 +/*
9966 +Define this macro if it is as good or better to call a constant
9967 +function address than to call an address kept in a register.
9968 +*/
9969 +#define NO_FUNCTION_CSE
9970 +
9971 +
9972 +/******************************************************************************
9973 + * Adjusting the Instruction Scheduler
9974 + *****************************************************************************/
9975 +
9976 +/*****************************************************************************
9977 + * Dividing the Output into Sections (Texts, Data, ...) *
9978 + *****************************************************************************/
9979 +
9980 +/*
9981 +A C expression whose value is a string, including spacing, containing the
9982 +assembler operation that should precede instructions and read-only data.
9983 +Normally "\t.text" is right.
9984 +*/
9985 +#define TEXT_SECTION_ASM_OP "\t.text"
9986 +/*
9987 +A C statement that switches to the default section containing instructions.
9988 +Normally this is not needed, as simply defining TEXT_SECTION_ASM_OP
9989 +is enough. The MIPS port uses this to sort all functions after all data
9990 +declarations.
9991 +*/
9992 +/* #define TEXT_SECTION */
9993 +
9994 +/*
9995 +A C expression whose value is a string, including spacing, containing the
9996 +assembler operation to identify the following data as writable initialized
9997 +data. Normally "\t.data" is right.
9998 +*/
9999 +#define DATA_SECTION_ASM_OP "\t.data"
10000 +
10001 +/*
10002 +If defined, a C expression whose value is a string, including spacing,
10003 +containing the assembler operation to identify the following data as
10004 +shared data. If not defined, DATA_SECTION_ASM_OP will be used.
10005 +*/
10006 +
10007 +/*
10008 +A C expression whose value is a string, including spacing, containing
10009 +the assembler operation to identify the following data as read-only
10010 +initialized data.
10011 +*/
10012 +#undef READONLY_DATA_SECTION_ASM_OP
10013 +#define READONLY_DATA_SECTION_ASM_OP \
10014 + ((TARGET_USE_RODATA_SECTION) ? \
10015 + "\t.section\t.rodata" : \
10016 + TEXT_SECTION_ASM_OP )
10017 +
10018 +
10019 +/*
10020 +If defined, a C expression whose value is a string, including spacing,
10021 +containing the assembler operation to identify the following data as
10022 +uninitialized global data. If not defined, and neither
10023 +ASM_OUTPUT_BSS nor ASM_OUTPUT_ALIGNED_BSS are defined,
10024 +uninitialized global data will be output in the data section if
10025 +-fno-common is passed, otherwise ASM_OUTPUT_COMMON will be
10026 +used.
10027 +*/
10028 +#define BSS_SECTION_ASM_OP "\t.section\t.bss"
10029 +
10030 +/*
10031 +If defined, a C expression whose value is a string, including spacing,
10032 +containing the assembler operation to identify the following data as
10033 +uninitialized global shared data. If not defined, and
10034 +BSS_SECTION_ASM_OP is, the latter will be used.
10035 +*/
10036 +/*#define SHARED_BSS_SECTION_ASM_OP "\trseg\tshared_bbs_section:data:noroot(0)\n"*/
10037 +/*
10038 +If defined, a C expression whose value is a string, including spacing,
10039 +containing the assembler operation to identify the following data as
10040 +initialization code. If not defined, GCC will assume such a section does
10041 +not exist.
10042 +*/
10043 +#undef INIT_SECTION_ASM_OP
10044 +#define INIT_SECTION_ASM_OP "\t.section\t.init"
10045 +
10046 +/*
10047 +If defined, a C expression whose value is a string, including spacing,
10048 +containing the assembler operation to identify the following data as
10049 +finalization code. If not defined, GCC will assume such a section does
10050 +not exist.
10051 +*/
10052 +#undef FINI_SECTION_ASM_OP
10053 +#define FINI_SECTION_ASM_OP "\t.section\t.fini"
10054 +
10055 +/*
10056 +If defined, an ASM statement that switches to a different section
10057 +via SECTION_OP, calls FUNCTION, and switches back to
10058 +the text section. This is used in crtstuff.c if
10059 +INIT_SECTION_ASM_OP or FINI_SECTION_ASM_OP to calls
10060 +to initialization and finalization functions from the init and fini
10061 +sections. By default, this macro uses a simple function call. Some
10062 +ports need hand-crafted assembly code to avoid dependencies on
10063 +registers initialized in the function prologue or to ensure that
10064 +constant pools don't end up too far way in the text section.
10065 +*/
10066 +#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
10067 + asm ( SECTION_OP "\n" \
10068 + "mcall r6[" USER_LABEL_PREFIX #FUNC "@got]\n" \
10069 + TEXT_SECTION_ASM_OP);
10070 +
10071 +
10072 +/*
10073 +Define this macro to be an expression with a nonzero value if jump
10074 +tables (for tablejump insns) should be output in the text
10075 +section, along with the assembler instructions. Otherwise, the
10076 +readonly data section is used.
10077 +
10078 +This macro is irrelevant if there is no separate readonly data section.
10079 +*/
10080 +#define JUMP_TABLES_IN_TEXT_SECTION 1
10081 +
10082 +
10083 +/******************************************************************************
10084 + * Position Independent Code (PIC)
10085 + *****************************************************************************/
10086 +
10087 +#ifndef AVR32_ALWAYS_PIC
10088 +#define AVR32_ALWAYS_PIC 0
10089 +#endif
10090 +
10091 +/* GOT is set to r6 */
10092 +#define PIC_OFFSET_TABLE_REGNUM INTERNAL_REGNUM(6)
10093 +
10094 +/*
10095 +A C expression that is nonzero if X is a legitimate immediate
10096 +operand on the target machine when generating position independent code.
10097 +You can assume that X satisfies CONSTANT_P, so you need not
10098 +check this. You can also assume flag_pic is true, so you need not
10099 +check it either. You need not define this macro if all constants
10100 +(including SYMBOL_REF) can be immediate operands when generating
10101 +position independent code.
10102 +*/
10103 +/* We can't directly access anything that contains a symbol,
10104 + nor can we indirect via the constant pool. */
10105 +#define LEGITIMATE_PIC_OPERAND_P(X) avr32_legitimate_pic_operand_p(X)
10106 +
10107 +
10108 +/* We need to know when we are making a constant pool; this determines
10109 + whether data needs to be in the GOT or can be referenced via a GOT
10110 + offset. */
10111 +extern int making_const_table;
10112 +
10113 +/******************************************************************************
10114 + * Defining the Output Assembler Language
10115 + *****************************************************************************/
10116 +
10117 +
10118 +/*
10119 +A C string constant describing how to begin a comment in the target
10120 +assembler language. The compiler assumes that the comment will end at
10121 +the end of the line.
10122 +*/
10123 +#define ASM_COMMENT_START "# "
10124 +
10125 +/*
10126 +A C string constant for text to be output before each asm
10127 +statement or group of consecutive ones. Normally this is
10128 +"#APP", which is a comment that has no effect on most
10129 +assemblers but tells the GNU assembler that it must check the lines
10130 +that follow for all valid assembler constructs.
10131 +*/
10132 +#undef ASM_APP_ON
10133 +#define ASM_APP_ON "#APP\n"
10134 +
10135 +/*
10136 +A C string constant for text to be output after each asm
10137 +statement or group of consecutive ones. Normally this is
10138 +"#NO_APP", which tells the GNU assembler to resume making the
10139 +time-saving assumptions that are valid for ordinary compiler output.
10140 +*/
10141 +#undef ASM_APP_OFF
10142 +#define ASM_APP_OFF "#NO_APP\n"
10143 +
10144 +
10145 +
10146 +#define FILE_ASM_OP "\t.file\n"
10147 +#define IDENT_ASM_OP "\t.ident\t"
10148 +#define SET_ASM_OP "\t.set\t"
10149 +
10150 +
10151 +/*
10152 + * Output assembly directives to switch to section name. The section
10153 + * should have attributes as specified by flags, which is a bit mask
10154 + * of the SECTION_* flags defined in 'output.h'. If align is nonzero,
10155 + * it contains an alignment in bytes to be used for the section,
10156 + * otherwise some target default should be used. Only targets that
10157 + * must specify an alignment within the section directive need pay
10158 + * attention to align -- we will still use ASM_OUTPUT_ALIGN.
10159 + *
10160 + * NOTE: This one must not be moved to avr32.c
10161 + */
10162 +#undef TARGET_ASM_NAMED_SECTION
10163 +#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
10164 +
10165 +
10166 +/*
10167 +You may define this macro as a C expression. You should define the
10168 +expression to have a nonzero value if GCC should output the constant
10169 +pool for a function before the code for the function, or a zero value if
10170 +GCC should output the constant pool after the function. If you do
10171 +not define this macro, the usual case, GCC will output the constant
10172 +pool before the function.
10173 +*/
10174 +#define CONSTANT_POOL_BEFORE_FUNCTION 0
10175 +
10176 +
10177 +/*
10178 +Define this macro as a C expression which is nonzero if the constant
10179 +EXP, of type tree, should be output after the code for a
10180 +function. The compiler will normally output all constants before the
10181 +function; you need not define this macro if this is OK.
10182 +*/
10183 +#define CONSTANT_AFTER_FUNCTION_P(EXP) 1
10184 +
10185 +
10186 +/*
10187 +Define this macro as a C expression which is nonzero if C is
10188 +used as a logical line separator by the assembler.
10189 +
10190 +If you do not define this macro, the default is that only
10191 +the character ';' is treated as a logical line separator.
10192 +*/
10193 +#define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == '\n')
10194 +
10195 +
10196 +/** Output of Uninitialized Variables **/
10197 +
10198 +/*
10199 +A C statement (sans semicolon) to output to the stdio stream
10200 +STREAM the assembler definition of a common-label named
10201 +NAME whose size is SIZE bytes. The variable ROUNDED
10202 +is the size rounded up to whatever alignment the caller wants.
10203 +
10204 +Use the expression assemble_name(STREAM, NAME) to
10205 +output the name itself; before and after that, output the additional
10206 +assembler syntax for defining the name, and a newline.
10207 +
10208 +This macro controls how the assembler definitions of uninitialized
10209 +common global variables are output.
10210 +*/
10211 +/*
10212 +#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
10213 + avr32_asm_output_common(STREAM, NAME, SIZE, ROUNDED)
10214 +*/
10215 +
10216 +#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
10217 + do \
10218 + { \
10219 + fputs ("\t.comm ", (FILE)); \
10220 + assemble_name ((FILE), (NAME)); \
10221 + fprintf ((FILE), ",%d\n", (SIZE)); \
10222 + } \
10223 + while (0)
10224 +
10225 +/*
10226 + * Like ASM_OUTPUT_BSS except takes the required alignment as a
10227 + * separate, explicit argument. If you define this macro, it is used
10228 + * in place of ASM_OUTPUT_BSS, and gives you more flexibility in
10229 + * handling the required alignment of the variable. The alignment is
10230 + * specified as the number of bits.
10231 + *
10232 + * Try to use function asm_output_aligned_bss defined in file varasm.c
10233 + * when defining this macro.
10234 + */
10235 +#define ASM_OUTPUT_ALIGNED_BSS(STREAM, DECL, NAME, SIZE, ALIGNMENT) \
10236 + asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGNMENT)
10237 +
10238 +/*
10239 +A C statement (sans semicolon) to output to the stdio stream
10240 +STREAM the assembler definition of a local-common-label named
10241 +NAME whose size is SIZE bytes. The variable ROUNDED
10242 +is the size rounded up to whatever alignment the caller wants.
10243 +
10244 +Use the expression assemble_name(STREAM, NAME) to
10245 +output the name itself; before and after that, output the additional
10246 +assembler syntax for defining the name, and a newline.
10247 +
10248 +This macro controls how the assembler definitions of uninitialized
10249 +static variables are output.
10250 +*/
10251 +#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
10252 + do \
10253 + { \
10254 + fputs ("\t.lcomm ", (FILE)); \
10255 + assemble_name ((FILE), (NAME)); \
10256 + fprintf ((FILE), ",%d, %d\n", (SIZE), 2); \
10257 + } \
10258 + while (0)
10259 +
10260 +
10261 +/*
10262 +A C statement (sans semicolon) to output to the stdio stream
10263 +STREAM the assembler definition of a label named NAME.
10264 +Use the expression assemble_name(STREAM, NAME) to
10265 +output the name itself; before and after that, output the additional
10266 +assembler syntax for defining the name, and a newline.
10267 +*/
10268 +#define ASM_OUTPUT_LABEL(STREAM, NAME) avr32_asm_output_label(STREAM, NAME)
10269 +
10270 +/* A C string containing the appropriate assembler directive to
10271 + * specify the size of a symbol, without any arguments. On systems
10272 + * that use ELF, the default (in 'config/elfos.h') is '"\t.size\t"';
10273 + * on other systems, the default is not to define this macro.
10274 + *
10275 + * Define this macro only if it is correct to use the default
10276 + * definitions of ASM_ OUTPUT_SIZE_DIRECTIVE and
10277 + * ASM_OUTPUT_MEASURED_SIZE for your system. If you need your own
10278 + * custom definitions of those macros, or if you do not need explicit
10279 + * symbol sizes at all, do not define this macro.
10280 + */
10281 +#define SIZE_ASM_OP "\t.size\t"
10282 +
10283 +
10284 +/*
10285 +A C statement (sans semicolon) to output to the stdio stream
10286 +STREAM some commands that will make the label NAME global;
10287 +that is, available for reference from other files. Use the expression
10288 +assemble_name(STREAM, NAME) to output the name
10289 +itself; before and after that, output the additional assembler syntax
10290 +for making that name global, and a newline.
10291 +*/
10292 +#define GLOBAL_ASM_OP "\t.globl\t"
10293 +
10294 +
10295 +
10296 +/*
10297 +A C expression which evaluates to true if the target supports weak symbols.
10298 +
10299 +If you don't define this macro, defaults.h provides a default
10300 +definition. If either ASM_WEAKEN_LABEL or ASM_WEAKEN_DECL
10301 +is defined, the default definition is '1'; otherwise, it is
10302 +'0'. Define this macro if you want to control weak symbol support
10303 +with a compiler flag such as -melf.
10304 +*/
10305 +#define SUPPORTS_WEAK 1
10306 +
10307 +/*
10308 +A C statement (sans semicolon) to output to the stdio stream
10309 +STREAM a reference in assembler syntax to a label named
10310 +NAME. This should add '_' to the front of the name, if that
10311 +is customary on your operating system, as it is in most Berkeley Unix
10312 +systems. This macro is used in assemble_name.
10313 +*/
10314 +#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
10315 + avr32_asm_output_labelref(STREAM, NAME)
10316 +
10317 +
10318 +
10319 +/*
10320 +A C expression to assign to OUTVAR (which is a variable of type
10321 +char *) a newly allocated string made from the string
10322 +NAME and the number NUMBER, with some suitable punctuation
10323 +added. Use alloca to get space for the string.
10324 +
10325 +The string will be used as an argument to ASM_OUTPUT_LABELREF to
10326 +produce an assembler label for an internal static variable whose name is
10327 +NAME. Therefore, the string must be such as to result in valid
10328 +assembler code. The argument NUMBER is different each time this
10329 +macro is executed; it prevents conflicts between similarly-named
10330 +internal static variables in different scopes.
10331 +
10332 +Ideally this string should not be a valid C identifier, to prevent any
10333 +conflict with the user's own symbols. Most assemblers allow periods
10334 +or percent signs in assembler symbols; putting at least one of these
10335 +between the name and the number will suffice.
10336 +*/
10337 +#define ASM_FORMAT_PRIVATE_NAME(OUTVAR, NAME, NUMBER) \
10338 + do \
10339 + { \
10340 + (OUTVAR) = (char *) alloca (strlen ((NAME)) + 10); \
10341 + sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)); \
10342 + } \
10343 + while (0)
10344 +
10345 +
10346 +/** Macros Controlling Initialization Routines **/
10347 +
10348 +
10349 +/*
10350 +If defined, main will not call __main as described above.
10351 +This macro should be defined for systems that control start-up code
10352 +on a symbol-by-symbol basis, such as OSF/1, and should not
10353 +be defined explicitly for systems that support INIT_SECTION_ASM_OP.
10354 +*/
10355 +/*
10356 + __main is not defined when debugging.
10357 +*/
10358 +#define HAS_INIT_SECTION
10359 +
10360 +
10361 +/** Output of Assembler Instructions **/
10362 +
10363 +/*
10364 +A C initializer containing the assembler's names for the machine
10365 +registers, each one as a C string constant. This is what translates
10366 +register numbers in the compiler into assembler language.
10367 +*/
10368 +
10369 +#define REGISTER_NAMES \
10370 +{ \
10371 + "pc", "lr", \
10372 + "sp", "r12", \
10373 + "r11", "r10", \
10374 + "r9", "r8", \
10375 + "r7", "r6", \
10376 + "r5", "r4", \
10377 + "r3", "r2", \
10378 + "r1", "r0", \
10379 + "f15","f14", \
10380 + "f13","f12", \
10381 + "f11","f10", \
10382 + "f9", "f8", \
10383 + "f7", "f6", \
10384 + "f5", "f4", \
10385 + "f3", "f2", \
10386 + "f1", "f0" \
10387 +}
10388 +
10389 +/*
10390 +A C compound statement to output to stdio stream STREAM the
10391 +assembler syntax for an instruction operand X. X is an
10392 +RTL expression.
10393 +
10394 +CODE is a value that can be used to specify one of several ways
10395 +of printing the operand. It is used when identical operands must be
10396 +printed differently depending on the context. CODE comes from
10397 +the '%' specification that was used to request printing of the
10398 +operand. If the specification was just '%digit' then
10399 +CODE is 0; if the specification was '%ltr digit'
10400 +then CODE is the ASCII code for ltr.
10401 +
10402 +If X is a register, this macro should print the register's name.
10403 +The names can be found in an array reg_names whose type is
10404 +char *[]. reg_names is initialized from REGISTER_NAMES.
10405 +
10406 +When the machine description has a specification '%punct'
10407 +(a '%' followed by a punctuation character), this macro is called
10408 +with a null pointer for X and the punctuation character for
10409 +CODE.
10410 +*/
10411 +#define PRINT_OPERAND(STREAM, X, CODE) avr32_print_operand(STREAM, X, CODE)
10412 +
10413 +/* A C statement to be executed just prior to the output of
10414 + assembler code for INSN, to modify the extracted operands so
10415 + they will be output differently.
10416 +
10417 + Here the argument OPVEC is the vector containing the operands
10418 + extracted from INSN, and NOPERANDS is the number of elements of
10419 + the vector which contain meaningful data for this insn.
10420 + The contents of this vector are what will be used to convert the insn
10421 + template into assembler code, so you can change the assembler output
10422 + by changing the contents of the vector. */
10423 +#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
10424 + avr32_final_prescan_insn ((INSN), (OPVEC), (NOPERANDS))
10425 +
10426 +/*
10427 +A C expression which evaluates to true if CODE is a valid
10428 +punctuation character for use in the PRINT_OPERAND macro. If
10429 +PRINT_OPERAND_PUNCT_VALID_P is not defined, it means that no
10430 +punctuation characters (except for the standard one, '%') are used
10431 +in this way.
10432 +*/
10433 +/*
10434 + 'm' refers to the most significant word in a two-register mode.
10435 +*/
10436 +#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == 'm' || (CODE) == 'e')
10437 +
10438 +/*
10439 +A C compound statement to output to stdio stream STREAM the
10440 +assembler syntax for an instruction operand that is a memory reference
10441 +whose address is X. X is an RTL expression.
10442 +
10443 +On some machines, the syntax for a symbolic address depends on the
10444 +section that the address refers to. On these machines, define the macro
10445 +ENCODE_SECTION_INFO to store the information into the
10446 +symbol_ref, and then check for it here. (see Assembler Format.)
10447 +*/
10448 +#define PRINT_OPERAND_ADDRESS(STREAM, X) avr32_print_operand_address(STREAM, X)
10449 +
10450 +
10451 +/** Output of Dispatch Tables **/
10452 +
10453 +/*
10454 + * A C statement to output to the stdio stream stream an assembler
10455 + * pseudo-instruction to generate a difference between two
10456 + * labels. value and rel are the numbers of two internal labels. The
10457 + * definitions of these labels are output using
10458 + * (*targetm.asm_out.internal_label), and they must be printed in the
10459 + * same way here. For example,
10460 + *
10461 + * fprintf (stream, "\t.word L%d-L%d\n",
10462 + * value, rel)
10463 + *
10464 + * You must provide this macro on machines where the addresses in a
10465 + * dispatch table are relative to the table's own address. If defined,
10466 + * GCC will also use this macro on all machines when producing
10467 + * PIC. body is the body of the ADDR_DIFF_VEC; it is provided so that
10468 + * the mode and flags can be read.
10469 + */
10470 +#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
10471 + fprintf(STREAM, "\tbral\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
10472 +
10473 +/*
10474 +This macro should be provided on machines where the addresses
10475 +in a dispatch table are absolute.
10476 +
10477 +The definition should be a C statement to output to the stdio stream
10478 +STREAM an assembler pseudo-instruction to generate a reference to
10479 +a label. VALUE is the number of an internal label whose
10480 +definition is output using ASM_OUTPUT_INTERNAL_LABEL.
10481 +For example,
10482 +
10483 +fprintf(STREAM, "\t.word L%d\n", VALUE)
10484 +*/
10485 +
10486 +#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
10487 + fprintf(STREAM, "\t.long %sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
10488 +
10489 +/** Assembler Commands for Exception Regions */
10490 +
10491 +/* ToDo: All of this subsection */
10492 +
10493 +/** Assembler Commands for Alignment */
10494 +
10495 +
10496 +/*
10497 +A C statement to output to the stdio stream STREAM an assembler
10498 +command to advance the location counter to a multiple of 2 to the
10499 +POWER bytes. POWER will be a C expression of type int.
10500 +*/
10501 +#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
10502 + do \
10503 + { \
10504 + if ((POWER) != 0) \
10505 + fprintf(STREAM, "\t.align\t%d\n", POWER); \
10506 + } \
10507 + while (0)
10508 +
10509 +/*
10510 +Like ASM_OUTPUT_ALIGN, except that the \nop" instruction is used for padding, if
10511 +necessary.
10512 +*/
10513 +#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, POWER) \
10514 + fprintf(STREAM, "\t.balignw\t%d, 0xd703\n", (1 << POWER))
10515 +
10516 +
10517 +
10518 +/******************************************************************************
10519 + * Controlling Debugging Information Format
10520 + *****************************************************************************/
10521 +
10522 +/* How to renumber registers for dbx and gdb. */
10523 +#define DBX_REGISTER_NUMBER(REGNO) ASM_REGNUM (REGNO)
10524 +
10525 +/* The DWARF 2 CFA column which tracks the return address. */
10526 +#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM(LR_REGNUM)
10527 +
10528 +/*
10529 +Define this macro if GCC should produce dwarf version 2 format
10530 +debugging output in response to the -g option.
10531 +
10532 +To support optional call frame debugging information, you must also
10533 +define INCOMING_RETURN_ADDR_RTX and either set
10534 +RTX_FRAME_RELATED_P on the prologue insns if you use RTL for the
10535 +prologue, or call dwarf2out_def_cfa and dwarf2out_reg_save
10536 +as appropriate from TARGET_ASM_FUNCTION_PROLOGUE if you don't.
10537 +*/
10538 +#define DWARF2_DEBUGGING_INFO 1
10539 +
10540 +
10541 +#define DWARF2_ASM_LINE_DEBUG_INFO 1
10542 +#define DWARF2_FRAME_INFO 1
10543 +
10544 +
10545 +/******************************************************************************
10546 + * Miscellaneous Parameters
10547 + *****************************************************************************/
10548 +
10549 +/* ToDo: a lot */
10550 +
10551 +/*
10552 +An alias for a machine mode name. This is the machine mode that
10553 +elements of a jump-table should have.
10554 +*/
10555 +#define CASE_VECTOR_MODE SImode
10556 +
10557 +/*
10558 +Define this macro to be a C expression to indicate when jump-tables
10559 +should contain relative addresses. If jump-tables never contain
10560 +relative addresses, then you need not define this macro.
10561 +*/
10562 +#define CASE_VECTOR_PC_RELATIVE 0
10563 +
10564 +/*
10565 +The maximum number of bytes that a single instruction can move quickly
10566 +between memory and registers or between two memory locations.
10567 +*/
10568 +#define MOVE_MAX (2*UNITS_PER_WORD)
10569 +
10570 +
10571 +/* A C expression that is nonzero if on this machine the number of bits actually used
10572 + for the count of a shift operation is equal to the number of bits needed to represent
10573 + the size of the object being shifted. When this macro is nonzero, the compiler will
10574 + assume that it is safe to omit a sign-extend, zero-extend, and certain bitwise 'and'
10575 + instructions that truncates the count of a shift operation. On machines that have
10576 + instructions that act on bit-fields at variable positions, which may include 'bit test'
10577 + 378 GNU Compiler Collection (GCC) Internals
10578 + instructions, a nonzero SHIFT_COUNT_TRUNCATED also enables deletion of truncations
10579 + of the values that serve as arguments to bit-field instructions.
10580 + If both types of instructions truncate the count (for shifts) and position (for bit-field
10581 + operations), or if no variable-position bit-field instructions exist, you should define
10582 + this macro.
10583 + However, on some machines, such as the 80386 and the 680x0, truncation only applies
10584 + to shift operations and not the (real or pretended) bit-field operations. Define SHIFT_
10585 + COUNT_TRUNCATED to be zero on such machines. Instead, add patterns to the 'md' file
10586 + that include the implied truncation of the shift instructions.
10587 + You need not de\fne this macro if it would always have the value of zero. */
10588 +#define SHIFT_COUNT_TRUNCATED 1
10589 +
10590 +/*
10591 +A C expression which is nonzero if on this machine it is safe to
10592 +convert an integer of INPREC bits to one of OUTPREC
10593 +bits (where OUTPREC is smaller than INPREC) by merely
10594 +operating on it as if it had only OUTPREC bits.
10595 +
10596 +On many machines, this expression can be 1.
10597 +
10598 +When TRULY_NOOP_TRUNCATION returns 1 for a pair of sizes for
10599 +modes for which MODES_TIEABLE_P is 0, suboptimal code can result.
10600 +If this is the case, making TRULY_NOOP_TRUNCATION return 0 in
10601 +such cases may improve things.
10602 +*/
10603 +#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
10604 +
10605 +/*
10606 +An alias for the machine mode for pointers. On most machines, define
10607 +this to be the integer mode corresponding to the width of a hardware
10608 +pointer; SImode on 32-bit machine or DImode on 64-bit machines.
10609 +On some machines you must define this to be one of the partial integer
10610 +modes, such as PSImode.
10611 +
10612 +The width of Pmode must be at least as large as the value of
10613 +POINTER_SIZE. If it is not equal, you must define the macro
10614 +POINTERS_EXTEND_UNSIGNED to specify how pointers are extended
10615 +to Pmode.
10616 +*/
10617 +#define Pmode SImode
10618 +
10619 +/*
10620 +An alias for the machine mode used for memory references to functions
10621 +being called, in call RTL expressions. On most machines this
10622 +should be QImode.
10623 +*/
10624 +#define FUNCTION_MODE SImode
10625 +
10626 +
10627 +#define REG_S_P(x) \
10628 + (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (XEXP (x, 0))))
10629 +
10630 +
10631 +/* If defined, modifies the length assigned to instruction INSN as a
10632 + function of the context in which it is used. LENGTH is an lvalue
10633 + that contains the initially computed length of the insn and should
10634 + be updated with the correct length of the insn. */
10635 +#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
10636 + ((LENGTH) = avr32_adjust_insn_length ((INSN), (LENGTH)))
10637 +
10638 +
10639 +#define CLZ_DEFINED_VALUE_AT_ZERO(mode, value) \
10640 + (value = 32, (mode == SImode))
10641 +
10642 +#define CTZ_DEFINED_VALUE_AT_ZERO(mode, value) \
10643 + (value = 32, (mode == SImode))
10644 +
10645 +#define UNITS_PER_SIMD_WORD UNITS_PER_WORD
10646 +
10647 +#define STORE_FLAG_VALUE 1
10648 +
10649 +enum avr32_builtins
10650 +{
10651 + AVR32_BUILTIN_MTSR,
10652 + AVR32_BUILTIN_MFSR,
10653 + AVR32_BUILTIN_MTDR,
10654 + AVR32_BUILTIN_MFDR,
10655 + AVR32_BUILTIN_CACHE,
10656 + AVR32_BUILTIN_SYNC,
10657 + AVR32_BUILTIN_TLBR,
10658 + AVR32_BUILTIN_TLBS,
10659 + AVR32_BUILTIN_TLBW,
10660 + AVR32_BUILTIN_BREAKPOINT,
10661 + AVR32_BUILTIN_XCHG,
10662 + AVR32_BUILTIN_LDXI,
10663 + AVR32_BUILTIN_BSWAP16,
10664 + AVR32_BUILTIN_BSWAP32,
10665 + AVR32_BUILTIN_COP,
10666 + AVR32_BUILTIN_MVCR_W,
10667 + AVR32_BUILTIN_MVRC_W,
10668 + AVR32_BUILTIN_MVCR_D,
10669 + AVR32_BUILTIN_MVRC_D,
10670 + AVR32_BUILTIN_MULSATHH_H,
10671 + AVR32_BUILTIN_MULSATHH_W,
10672 + AVR32_BUILTIN_MULSATRNDHH_H,
10673 + AVR32_BUILTIN_MULSATRNDWH_W,
10674 + AVR32_BUILTIN_MULSATWH_W,
10675 + AVR32_BUILTIN_MACSATHH_W,
10676 + AVR32_BUILTIN_SATADD_H,
10677 + AVR32_BUILTIN_SATSUB_H,
10678 + AVR32_BUILTIN_SATADD_W,
10679 + AVR32_BUILTIN_SATSUB_W,
10680 + AVR32_BUILTIN_MULWH_D,
10681 + AVR32_BUILTIN_MULNWH_D,
10682 + AVR32_BUILTIN_MACWH_D,
10683 + AVR32_BUILTIN_MACHH_D,
10684 + AVR32_BUILTIN_MUSFR,
10685 + AVR32_BUILTIN_MUSTR,
10686 + AVR32_BUILTIN_SATS,
10687 + AVR32_BUILTIN_SATU,
10688 + AVR32_BUILTIN_SATRNDS,
10689 + AVR32_BUILTIN_SATRNDU
10690 +};
10691 +
10692 +
10693 +#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) \
10694 + ((MODE == SFmode) || (MODE == DFmode))
10695 +
10696 +#define RENAME_LIBRARY_SET ".set"
10697 +
10698 +/* Make ABI_NAME an alias for __GCC_NAME. */
10699 +#define RENAME_LIBRARY(GCC_NAME, ABI_NAME) \
10700 + __asm__ (".globl\t__avr32_" #ABI_NAME "\n" \
10701 + ".set\t__avr32_" #ABI_NAME \
10702 + ", __" #GCC_NAME "\n");
10703 +
10704 +/* Give libgcc functions avr32 ABI name. */
10705 +#ifdef L_muldi3
10706 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, mul64)
10707 +#endif
10708 +#ifdef L_divdi3
10709 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (divdi3, sdiv64)
10710 +#endif
10711 +#ifdef L_udivdi3
10712 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (udivdi3, udiv64)
10713 +#endif
10714 +#ifdef L_moddi3
10715 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (moddi3, smod64)
10716 +#endif
10717 +#ifdef L_umoddi3
10718 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (umoddi3, umod64)
10719 +#endif
10720 +#ifdef L_ashldi3
10721 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashldi3, lsl64)
10722 +#endif
10723 +#ifdef L_lshrdi3
10724 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (lshrdi3, lsr64)
10725 +#endif
10726 +#ifdef L_ashrdi3
10727 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashrdi3, asr64)
10728 +#endif
10729 +
10730 +#ifdef L_fixsfdi
10731 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f32_to_s64)
10732 +#endif
10733 +#ifdef L_fixunssfdi
10734 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f32_to_u64)
10735 +#endif
10736 +#ifdef L_floatdidf
10737 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, s64_to_f64)
10738 +#endif
10739 +#ifdef L_floatdisf
10740 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdisf, s64_to_f32)
10741 +#endif
10742 +
10743 +#ifdef L_addsub_sf
10744 +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (addsf3, f32_add); RENAME_LIBRARY (subsf3, f32_sub)
10745 +#endif
10746 +
10747 +#endif
10748 diff -Nur gcc-4.1.2/gcc/config/avr32/avr32.md gcc-4.1.2-owrt/gcc/config/avr32/avr32.md
10749 --- gcc-4.1.2/gcc/config/avr32/avr32.md 1970-01-01 01:00:00.000000000 +0100
10750 +++ gcc-4.1.2-owrt/gcc/config/avr32/avr32.md 2007-05-24 12:03:28.000000000 +0200
10751 @@ -0,0 +1,4694 @@
10752 +;; AVR32 machine description file.
10753 +;; Copyright 2003-2006 Atmel Corporation.
10754 +;;
10755 +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
10756 +;;
10757 +;; This file is part of GCC.
10758 +;;
10759 +;; This program is free software; you can redistribute it and/or modify
10760 +;; it under the terms of the GNU General Public License as published by
10761 +;; the Free Software Foundation; either version 2 of the License, or
10762 +;; (at your option) any later version.
10763 +;;
10764 +;; This program is distributed in the hope that it will be useful,
10765 +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
10766 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10767 +;; GNU General Public License for more details.
10768 +;;
10769 +;; You should have received a copy of the GNU General Public License
10770 +;; along with this program; if not, write to the Free Software
10771 +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
10772 +
10773 +;; -*- Mode: Scheme -*-
10774 +
10775 +(define_attr "type" "alu,alu2,alu_sat,mulhh,mulwh,mulww_w,mulww_d,div,machh_w,macww_w,macww_d,branch,call,load,load_rm,store,load2,load4,store2,store4,fmul,fcmps,fcmpd,fcast,fmv,fmvcpu,fldd,fstd,flds,fsts,fstm"
10776 + (const_string "alu"))
10777 +
10778 +
10779 +(define_attr "cc" "none,set_vncz,set_ncz,set_cz,set_z,bld,compare,clobber,call_set,fpcompare,from_fpcc"
10780 + (const_string "none"))
10781 +
10782 +
10783 +(define_attr "pipeline" "ap,uc"
10784 + (const_string "ap"))
10785 +
10786 +(define_attr "length" ""
10787 + (const_int 4))
10788 +
10789 +
10790 +;; Uses of UNSPEC in this file:
10791 +(define_constants
10792 + [(UNSPEC_PUSHM 0)
10793 + (UNSPEC_POPM 1)
10794 + (UNSPEC_UDIVMODSI4_INTERNAL 2)
10795 + (UNSPEC_DIVMODSI4_INTERNAL 3)
10796 + (UNSPEC_STM 4)
10797 + (UNSPEC_LDM 5)
10798 + (UNSPEC_MOVSICC 6)
10799 + (UNSPEC_ADDSICC 7)
10800 + (UNSPEC_COND_MI 8)
10801 + (UNSPEC_COND_PL 9)
10802 + (UNSPEC_PIC_SYM 10)
10803 + (UNSPEC_PIC_BASE 11)
10804 + (UNSPEC_STORE_MULTIPLE 12)
10805 + (UNSPEC_STMFP 13)
10806 + (UNSPEC_FPCC_TO_REG 14)
10807 + (UNSPEC_REG_TO_CC 15)
10808 + (UNSPEC_FORCE_MINIPOOL 16)
10809 + (UNSPEC_SATS 17)
10810 + (UNSPEC_SATU 18)
10811 + (UNSPEC_SATRNDS 19)
10812 + (UNSPEC_SATRNDU 20)
10813 + ])
10814 +
10815 +(define_constants
10816 + [(VUNSPEC_EPILOGUE 0)
10817 + (VUNSPEC_CACHE 1)
10818 + (VUNSPEC_MTSR 2)
10819 + (VUNSPEC_MFSR 3)
10820 + (VUNSPEC_BLOCKAGE 4)
10821 + (VUNSPEC_SYNC 5)
10822 + (VUNSPEC_TLBR 6)
10823 + (VUNSPEC_TLBW 7)
10824 + (VUNSPEC_TLBS 8)
10825 + (VUNSPEC_BREAKPOINT 9)
10826 + (VUNSPEC_MTDR 10)
10827 + (VUNSPEC_MFDR 11)
10828 + (VUNSPEC_MVCR 12)
10829 + (VUNSPEC_MVRC 13)
10830 + (VUNSPEC_COP 14)
10831 + (VUNSPEC_ALIGN 15)
10832 + (VUNSPEC_POOL_START 16)
10833 + (VUNSPEC_POOL_END 17)
10834 + (VUNSPEC_POOL_4 18)
10835 + (VUNSPEC_POOL_8 19)
10836 + (VUNSPEC_MUSFR 20)
10837 + (VUNSPEC_MUSTR 21)
10838 + ])
10839 +
10840 +(define_constants
10841 + [
10842 + ;; R7 = 15-7 = 8
10843 + (FP_REGNUM 8)
10844 + ;; Return Register = R12 = 15 - 12 = 3
10845 + (RETVAL_REGNUM 3)
10846 + ;; SP = R13 = 15 - 13 = 2
10847 + (SP_REGNUM 2)
10848 + ;; LR = R14 = 15 - 14 = 1
10849 + (LR_REGNUM 1)
10850 + ;; PC = R15 = 15 - 15 = 0
10851 + (PC_REGNUM 0)
10852 + ;; FPSR = GENERAL_REGS + 1 = 17
10853 + (FPCC_REGNUM 17)
10854 + ])
10855 +
10856 +
10857 +
10858 +
10859 +;;******************************************************************************
10860 +;; Macros
10861 +;;******************************************************************************
10862 +
10863 +;; Integer Modes for basic alu insns
10864 +(define_mode_macro INTM [SI HI QI])
10865 +(define_mode_attr alu_cc_attr [(SI "set_vncz") (HI "clobber") (QI "clobber")])
10866 +
10867 +;; Move word modes
10868 +(define_mode_macro MOVM [SI V2HI V4QI])
10869 +
10870 +;; For mov/addcc insns
10871 +(define_mode_macro ADDCC [SI HI QI])
10872 +(define_mode_macro MOVCC [SI HI QI])
10873 +(define_mode_macro CMP [DI SI HI QI])
10874 +(define_mode_attr cmp_constraint [(DI "r") (SI "rKs21") (HI "r") (QI "r")])
10875 +(define_mode_attr cmp_predicate [(DI "register_operand")
10876 + (SI "register_immediate_operand")
10877 + (HI "register_operand")
10878 + (QI "register_operand")])
10879 +
10880 +;; For all conditional insns
10881 +(define_code_macro any_cond [eq ne gt ge lt le gtu geu ltu leu])
10882 +(define_code_attr cond [(eq "eq") (ne "ne") (gt "gt") (ge "ge") (lt "lt") (le "le")
10883 + (gtu "hi") (geu "hs") (ltu "lo") (leu "ls")])
10884 +(define_code_attr invcond [(eq "ne") (ne "eq") (gt "le") (ge "lt") (lt "ge") (le "gt")
10885 + (gtu "ls") (geu "lo") (ltu "hs") (leu "hi")])
10886 +
10887 +;; For logical operations
10888 +(define_code_macro logical [and ior xor])
10889 +(define_code_attr logical_insn [(and "and") (ior "or") (xor "eor")])
10890 +
10891 +;; Load the predicates
10892 +(include "predicates.md")
10893 +
10894 +
10895 +;;******************************************************************************
10896 +;; Automaton pipeline description for avr32
10897 +;;******************************************************************************
10898 +
10899 +(define_automaton "avr32_ap")
10900 +
10901 +
10902 +(define_cpu_unit "is" "avr32_ap")
10903 +(define_cpu_unit "a1,m1,da" "avr32_ap")
10904 +(define_cpu_unit "a2,m2,d" "avr32_ap")
10905 +
10906 +;;Alu instructions
10907 +(define_insn_reservation "alu_op" 1
10908 + (and (eq_attr "pipeline" "ap")
10909 + (eq_attr "type" "alu"))
10910 + "is,a1,a2")
10911 +
10912 +(define_insn_reservation "alu2_op" 2
10913 + (and (eq_attr "pipeline" "ap")
10914 + (eq_attr "type" "alu2"))
10915 + "is,is+a1,a1+a2,a2")
10916 +
10917 +(define_insn_reservation "alu_sat_op" 2
10918 + (and (eq_attr "pipeline" "ap")
10919 + (eq_attr "type" "alu_sat"))
10920 + "is,a1,a2")
10921 +
10922 +
10923 +;;Mul instructions
10924 +(define_insn_reservation "mulhh_op" 2
10925 + (and (eq_attr "pipeline" "ap")
10926 + (eq_attr "type" "mulhh,mulwh"))
10927 + "is,m1,m2")
10928 +
10929 +(define_insn_reservation "mulww_w_op" 3
10930 + (and (eq_attr "pipeline" "ap")
10931 + (eq_attr "type" "mulww_w"))
10932 + "is,m1,m1+m2,m2")
10933 +
10934 +(define_insn_reservation "mulww_d_op" 5
10935 + (and (eq_attr "pipeline" "ap")
10936 + (eq_attr "type" "mulww_d"))
10937 + "is,m1,m1+m2,m1+m2,m2,m2")
10938 +
10939 +(define_insn_reservation "div_op" 33
10940 + (and (eq_attr "pipeline" "ap")
10941 + (eq_attr "type" "div"))
10942 + "is,m1,m1*31 + m2*31,m2")
10943 +
10944 +(define_insn_reservation "machh_w_op" 3
10945 + (and (eq_attr "pipeline" "ap")
10946 + (eq_attr "type" "machh_w"))
10947 + "is*2,m1,m2")
10948 +
10949 +
10950 +(define_insn_reservation "macww_w_op" 4
10951 + (and (eq_attr "pipeline" "ap")
10952 + (eq_attr "type" "macww_w"))
10953 + "is*2,m1,m1,m2")
10954 +
10955 +
10956 +(define_insn_reservation "macww_d_op" 6
10957 + (and (eq_attr "pipeline" "ap")
10958 + (eq_attr "type" "macww_d"))
10959 + "is*2,m1,m1+m2,m1+m2,m2")
10960 +
10961 +;;Bypasses for Mac instructions, because of accumulator cache.
10962 +;;Set latency as low as possible in order to let the compiler let
10963 +;;mul -> mac and mac -> mac combinations which use the same
10964 +;;accumulator cache be placed close together to avoid any
10965 +;;instructions which can ruin the accumulator cache come inbetween.
10966 +(define_bypass 4 "machh_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
10967 +(define_bypass 5 "macww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
10968 +(define_bypass 7 "macww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
10969 +
10970 +(define_bypass 3 "mulhh_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
10971 +(define_bypass 4 "mulww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
10972 +(define_bypass 6 "mulww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
10973 +
10974 +
10975 +;;Bypasses for all mul/mac instructions followed by an instruction
10976 +;;which reads the output AND writes the result to the same register.
10977 +;;This will generate an Write After Write hazard which gives an
10978 +;;extra cycle before the result is ready.
10979 +(define_bypass 0 "machh_w_op" "machh_w_op" "avr32_valid_macmac_bypass")
10980 +(define_bypass 0 "macww_w_op" "macww_w_op" "avr32_valid_macmac_bypass")
10981 +(define_bypass 0 "macww_d_op" "macww_d_op" "avr32_valid_macmac_bypass")
10982 +
10983 +(define_bypass 0 "mulhh_op" "machh_w_op" "avr32_valid_mulmac_bypass")
10984 +(define_bypass 0 "mulww_w_op" "macww_w_op" "avr32_valid_mulmac_bypass")
10985 +(define_bypass 0 "mulww_d_op" "macww_d_op" "avr32_valid_mulmac_bypass")
10986 +
10987 +;;Branch and call instructions
10988 +;;We assume that all branches and rcalls are predicted correctly :-)
10989 +;;while calls use a lot of cycles.
10990 +(define_insn_reservation "branch_op" 0
10991 + (and (eq_attr "pipeline" "ap")
10992 + (eq_attr "type" "branch"))
10993 + "nothing")
10994 +
10995 +(define_insn_reservation "call_op" 10
10996 + (and (eq_attr "pipeline" "ap")
10997 + (eq_attr "type" "call"))
10998 + "nothing")
10999 +
11000 +
11001 +;;Load store instructions
11002 +(define_insn_reservation "load_op" 2
11003 + (and (eq_attr "pipeline" "ap")
11004 + (eq_attr "type" "load"))
11005 + "is,da,d")
11006 +
11007 +(define_insn_reservation "load_rm_op" 3
11008 + (and (eq_attr "pipeline" "ap")
11009 + (eq_attr "type" "load_rm"))
11010 + "is,da,d")
11011 +
11012 +
11013 +(define_insn_reservation "store_op" 0
11014 + (and (eq_attr "pipeline" "ap")
11015 + (eq_attr "type" "store"))
11016 + "is,da,d")
11017 +
11018 +
11019 +(define_insn_reservation "load_double_op" 3
11020 + (and (eq_attr "pipeline" "ap")
11021 + (eq_attr "type" "load2"))
11022 + "is,da,da+d,d")
11023 +
11024 +(define_insn_reservation "load_quad_op" 4
11025 + (and (eq_attr "pipeline" "ap")
11026 + (eq_attr "type" "load4"))
11027 + "is,da,da+d,da+d,d")
11028 +
11029 +(define_insn_reservation "store_double_op" 0
11030 + (and (eq_attr "pipeline" "ap")
11031 + (eq_attr "type" "store2"))
11032 + "is,da,da+d,d")
11033 +
11034 +
11035 +(define_insn_reservation "store_quad_op" 0
11036 + (and (eq_attr "pipeline" "ap")
11037 + (eq_attr "type" "store4"))
11038 + "is,da,da+d,da+d,d")
11039 +
11040 +;;For store the operand to write to memory is read in d and
11041 +;;the real latency between any instruction and a store is therefore
11042 +;;one less than for the instructions which reads the operands in the first
11043 +;;excecution stage
11044 +(define_bypass 2 "load_double_op" "store_double_op" "avr32_store_bypass")
11045 +(define_bypass 3 "load_quad_op" "store_quad_op" "avr32_store_bypass")
11046 +(define_bypass 1 "load_op" "store_op" "avr32_store_bypass")
11047 +(define_bypass 2 "load_rm_op" "store_op" "avr32_store_bypass")
11048 +(define_bypass 1 "alu_sat_op" "store_op" "avr32_store_bypass")
11049 +(define_bypass 1 "alu2_op" "store_op" "avr32_store_bypass")
11050 +(define_bypass 1 "mulhh_op" "store_op" "avr32_store_bypass")
11051 +(define_bypass 2 "mulww_w_op" "store_op" "avr32_store_bypass")
11052 +(define_bypass 4 "mulww_d_op" "store_op" "avr32_store_bypass" )
11053 +(define_bypass 2 "machh_w_op" "store_op" "avr32_store_bypass")
11054 +(define_bypass 3 "macww_w_op" "store_op" "avr32_store_bypass")
11055 +(define_bypass 5 "macww_d_op" "store_op" "avr32_store_bypass")
11056 +
11057 +
11058 +; Bypass for load double operation. If only the first loaded word is needed
11059 +; then the latency is 2
11060 +(define_bypass 2 "load_double_op"
11061 + "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
11062 + mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
11063 + "avr32_valid_load_double_bypass")
11064 +
11065 +; Bypass for load quad operation. If only the first or second loaded word is needed
11066 +; we set the latency to 2
11067 +(define_bypass 2 "load_quad_op"
11068 + "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
11069 + mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
11070 + "avr32_valid_load_quad_bypass")
11071 +
11072 +
11073 +;;******************************************************************************
11074 +;; End of Automaton pipeline description for avr32
11075 +;;******************************************************************************
11076 +
11077 +
11078 +
11079 +;;=============================================================================
11080 +;; move
11081 +;;-----------------------------------------------------------------------------
11082 +
11083 +;;== char - 8 bits ============================================================
11084 +(define_expand "movqi"
11085 + [(set (match_operand:QI 0 "nonimmediate_operand" "")
11086 + (match_operand:QI 1 "general_operand" ""))]
11087 + ""
11088 + {
11089 + if ( !no_new_pseudos ){
11090 + if (GET_CODE (operands[1]) == MEM && optimize){
11091 + rtx reg = gen_reg_rtx (SImode);
11092 +
11093 + emit_insn (gen_zero_extendqisi2 (reg, operands[1]));
11094 + operands[1] = gen_lowpart (QImode, reg);
11095 + }
11096 +
11097 + /* One of the ops has to be in a register. */
11098 + if (GET_CODE (operands[0]) == MEM)
11099 + operands[1] = force_reg (QImode, operands[1]);
11100 + }
11101 +
11102 + })
11103 +
11104 +(define_insn "*movqi_internal"
11105 + [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r")
11106 + (match_operand:QI 1 "general_operand" "rKs08,m,r,i"))]
11107 + ""
11108 + "@
11109 + mov\t%0, %1
11110 + ld.ub\t%0, %1
11111 + st.b\t%0, %1
11112 + mov\t%0, %1"
11113 + [(set_attr "length" "2,4,4,4")
11114 + (set_attr "type" "alu,load_rm,store,alu")])
11115 +
11116 +
11117 +
11118 +;;== short - 16 bits ==========================================================
11119 +(define_expand "movhi"
11120 + [(set (match_operand:HI 0 "nonimmediate_operand" "")
11121 + (match_operand:HI 1 "general_operand" ""))]
11122 + ""
11123 + {
11124 + if ( !no_new_pseudos ){
11125 + if (GET_CODE (operands[1]) == MEM && optimize){
11126 + rtx reg = gen_reg_rtx (SImode);
11127 +
11128 + emit_insn (gen_extendhisi2 (reg, operands[1]));
11129 + operands[1] = gen_lowpart (HImode, reg);
11130 + }
11131 +
11132 + /* One of the ops has to be in a register. */
11133 + if (GET_CODE (operands[0]) == MEM)
11134 + operands[1] = force_reg (HImode, operands[1]);
11135 + }
11136 +
11137 + })
11138 +
11139 +(define_insn "*movhi_internal"
11140 + [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
11141 + (match_operand:HI 1 "general_operand" "rKs08,m,r,i"))]
11142 + ""
11143 + "@
11144 + mov\t%0, %1
11145 + ld.sh\t%0, %1
11146 + st.h\t%0, %1
11147 + mov\t%0, %1"
11148 + [(set_attr "length" "2,4,4,4")
11149 + (set_attr "type" "alu,load_rm,store,alu")])
11150 +
11151 +
11152 +;;== int - 32 bits ============================================================
11153 +
11154 +(define_expand "movmisalignsi"
11155 + [(set (match_operand:SI 0 "nonimmediate_operand" "")
11156 + (match_operand:SI 1 "nonimmediate_operand" ""))]
11157 + "TARGET_UNALIGNED_WORD"
11158 + {
11159 + }
11160 +)
11161 +
11162 +(define_expand "mov<mode>"
11163 + [(set (match_operand:MOVM 0 "nonimmediate_operand" "")
11164 + (match_operand:MOVM 1 "general_operand" ""))]
11165 + ""
11166 + {
11167 +
11168 + /* One of the ops has to be in a register. */
11169 + if (GET_CODE (operands[0]) == MEM)
11170 + operands[1] = force_reg (<MODE>mode, operands[1]);
11171 +
11172 +
11173 + /* Check for out of range immediate constants as these may
11174 + occur during reloading, since it seems like reload does
11175 + not check if the immediate is legitimate. Don't know if
11176 + this is a bug? */
11177 + if ( reload_in_progress
11178 + && GET_CODE(operands[1]) == CONST_INT
11179 + && !avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', "Ks21") ){
11180 + operands[1] = force_const_mem(SImode, operands[1]);
11181 + }
11182 +
11183 + if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
11184 + && !avr32_legitimate_pic_operand_p(operands[1]) )
11185 + operands[1] = legitimize_pic_address (operands[1], <MODE>mode,
11186 + (no_new_pseudos ? operands[0] : 0));
11187 + else if ( flag_pic && avr32_address_operand(operands[1], GET_MODE(operands[1])) )
11188 + /* If we have an address operand then this function uses the pic register. */
11189 + current_function_uses_pic_offset_table = 1;
11190 + })
11191 +
11192 +
11193 +(define_insn "mov<mode>_internal"
11194 + [(set (match_operand:MOVM 0 "nonimmediate_operand" "=r,r,r,m,r")
11195 + (match_operand:MOVM 1 "general_operand" "rKs08,Ks21,m,r,W"))]
11196 + ""
11197 + {
11198 + switch (which_alternative) {
11199 + case 0:
11200 + case 1: return "mov\t%0, %1";
11201 + case 2:
11202 + if ( (REG_P(XEXP(operands[1], 0))
11203 + && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
11204 + || (GET_CODE(XEXP(operands[1], 0)) == PLUS
11205 + && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
11206 + && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
11207 + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
11208 + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
11209 + return "lddsp\t%0, %1";
11210 + else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
11211 + return "lddpc\t%0, %1";
11212 + else
11213 + return "ld.w\t%0, %1";
11214 + case 3:
11215 + if ( (REG_P(XEXP(operands[0], 0))
11216 + && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
11217 + || (GET_CODE(XEXP(operands[0], 0)) == PLUS
11218 + && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
11219 + && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
11220 + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
11221 + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
11222 + return "stdsp\t%0, %1";
11223 + else
11224 + return "st.w\t%0, %1";
11225 + case 4:
11226 + if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
11227 + return "lda.w\t%0, %1";
11228 + else
11229 + return "ld.w\t%0, r6[%1@got]";
11230 + default:
11231 + abort();
11232 + }
11233 + }
11234 +
11235 + [(set_attr "length" "2,4,4,4,8")
11236 + (set_attr "type" "alu,alu,load,store,load")
11237 + (set_attr "cc" "none,none,none,none,clobber")])
11238 +
11239 +
11240 +;; These instructions are for loading constants which cannot be loaded
11241 +;; directly from the constant pool because the offset is too large
11242 +;; high and lo_sum are used even tough for our case it should be
11243 +;; low and high sum :-)
11244 +(define_insn "mov_symbol_lo"
11245 + [(set (match_operand:SI 0 "register_operand" "=r")
11246 + (high:SI (match_operand:SI 1 "immediate_operand" "i" )))]
11247 + ""
11248 + "mov\t%0, lo(%1)"
11249 + [(set_attr "type" "alu")
11250 + (set_attr "length" "4")]
11251 +)
11252 +
11253 +(define_insn "add_symbol_hi"
11254 + [(set (match_operand:SI 0 "register_operand" "=r")
11255 + (lo_sum:SI (match_dup 0)
11256 + (match_operand:SI 1 "immediate_operand" "i" )))]
11257 + ""
11258 + "orh\t%0, hi(%1)"
11259 + [(set_attr "type" "alu")
11260 + (set_attr "length" "4")]
11261 +)
11262 +
11263 +
11264 +
11265 +;; When generating pic, we need to load the symbol offset into a register.
11266 +;; So that the optimizer does not confuse this with a normal symbol load
11267 +;; we use an unspec. The offset will be loaded from a constant pool entry,
11268 +;; since that is the only type of relocation we can use.
11269 +(define_insn "pic_load_addr"
11270 + [(set (match_operand:SI 0 "register_operand" "=r")
11271 + (unspec:SI [(match_operand:SI 1 "" "")] UNSPEC_PIC_SYM))]
11272 + "flag_pic && CONSTANT_POOL_ADDRESS_P(XEXP(operands[1], 0))"
11273 + "lddpc\t%0, %1"
11274 + [(set_attr "type" "load")
11275 + (set_attr "length" "4")]
11276 +)
11277 +
11278 +(define_insn "pic_compute_got_from_pc"
11279 + [(set (match_operand:SI 0 "register_operand" "+r")
11280 + (unspec:SI [(minus:SI (pc)
11281 + (match_dup 0))] UNSPEC_PIC_BASE))
11282 + (use (label_ref (match_operand 1 "" "")))]
11283 + "flag_pic"
11284 + {
11285 + (*targetm.asm_out.internal_label) (asm_out_file, "L",
11286 + CODE_LABEL_NUMBER (operands[1]));
11287 + return \"rsub\t%0, pc\";
11288 + }
11289 + [(set_attr "cc" "clobber")
11290 + (set_attr "length" "2")]
11291 +)
11292 +
11293 +;;== long long int - 64 bits ==================================================
11294 +(define_expand "movdi"
11295 + [(set (match_operand:DI 0 "nonimmediate_operand" "")
11296 + (match_operand:DI 1 "general_operand" ""))]
11297 + ""
11298 + {
11299 +
11300 + /* One of the ops has to be in a register. */
11301 + if (GET_CODE (operands[0]) != REG)
11302 + operands[1] = force_reg (DImode, operands[1]);
11303 +
11304 + })
11305 +
11306 +
11307 +(define_insn_and_split "*movdi_internal"
11308 + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,r,m")
11309 + (match_operand:DI 1 "general_operand" "r,Ks08,Ks21,G,m,r"))]
11310 + ""
11311 + {
11312 + switch (which_alternative ){
11313 + case 1:
11314 + case 2:
11315 + if ( INTVAL(operands[1]) < 0 )
11316 + return "mov\t%0, %1\;mov\t%m0, -1";
11317 + else
11318 + return "mov\t%0, %1\;mov\t%m0, 0";
11319 + case 0:
11320 + case 3:
11321 + return "mov\t%0, %1\;mov\t%m0, %m1";
11322 + case 4:
11323 + if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
11324 + return "ld.d\t%0, pc[%1 - .]";
11325 + else
11326 + return "ld.d\t%0, %1";
11327 + case 5:
11328 + return "st.d\t%0, %1";
11329 + default:
11330 + abort();
11331 + }
11332 + }
11333 + "reload_completed &&
11334 + (REG_P(operands[0]) &&
11335 + (REG_P(operands[1]) || avr32_const_double_immediate(operands[1]) ||
11336 + ((GET_CODE(operands[1]) == CONST_INT) && avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', \"Ks21\")) ))"
11337 + [(set (match_dup 0) (match_dup 1))
11338 + (set (match_dup 2) (match_dup 3))]
11339 + {
11340 + operands[2] = gen_highpart (SImode, operands[0]);
11341 + operands[0] = gen_lowpart (SImode, operands[0]);
11342 + if ( REG_P(operands[1]) ){
11343 + operands[3] = gen_highpart(SImode, operands[1]);
11344 + operands[1] = gen_lowpart(SImode, operands[1]);
11345 + } else if ( GET_CODE(operands[1]) == CONST_DOUBLE ){
11346 + operands[3] = GEN_INT(CONST_DOUBLE_LOW(operands[1]));
11347 + operands[1] = GEN_INT(CONST_DOUBLE_HIGH(operands[1]));
11348 + } else if ( GET_CODE(operands[1]) == CONST_INT ){
11349 + operands[3] = GEN_INT((INTVAL(operands[1]) < 0) ? -1 : 0);
11350 + operands[1] = operands[1];
11351 + } else {
11352 + internal_error("Illegal operand[1] for movdi split!");
11353 + }
11354 + }
11355 +
11356 + [(set_attr "length" "4,6,8,8,4,4")
11357 + (set_attr "type" "alu2,alu2,alu2,alu2,load2,store2")])
11358 +
11359 +
11360 +;;== 128 bits ==================================================
11361 +(define_expand "movti"
11362 + [(set (match_operand:TI 0 "nonimmediate_operand" "")
11363 + (match_operand:TI 1 "general_operand" ""))]
11364 + ""
11365 + {
11366 +
11367 + /* One of the ops has to be in a register. */
11368 + if (GET_CODE (operands[0]) != REG)
11369 + operands[1] = force_reg (TImode, operands[1]);
11370 +
11371 + /* We must fix any pre_dec for loads and post_inc stores */
11372 + if ( GET_CODE (operands[0]) == MEM
11373 + && GET_CODE (XEXP(operands[0],0)) == POST_INC ){
11374 + emit_move_insn(gen_rtx_MEM(TImode, XEXP(XEXP(operands[0],0),0)), operands[1]);
11375 + emit_insn(gen_addsi3(XEXP(XEXP(operands[0],0),0), XEXP(XEXP(operands[0],0),0), GEN_INT(GET_MODE_SIZE(TImode))));
11376 + DONE;
11377 + }
11378 +
11379 + if ( GET_CODE (operands[1]) == MEM
11380 + && GET_CODE (XEXP(operands[1],0)) == PRE_DEC ){
11381 + emit_insn(gen_addsi3(XEXP(XEXP(operands[1],0),0), XEXP(XEXP(operands[1],0),0), GEN_INT(-GET_MODE_SIZE(TImode))));
11382 + emit_move_insn(operands[0], gen_rtx_MEM(TImode, XEXP(XEXP(operands[1],0),0)));
11383 + DONE;
11384 + }
11385 +
11386 + if (GET_CODE (operands[1]) == CONST_INT){
11387 + unsigned int sign_extend = (INTVAL(operands[1]) < 0) ? 0xFFFFFFFF : 0;
11388 + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 12), operands[1]);
11389 + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 8), GEN_INT(sign_extend));
11390 + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 4), GEN_INT(sign_extend));
11391 + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 0), GEN_INT(sign_extend));
11392 + DONE;
11393 + }
11394 +
11395 + if (GET_CODE (operands[0]) == REG
11396 + && GET_CODE (operands[1]) == REG){
11397 + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 12), gen_rtx_SUBREG(SImode, operands[1], 12));
11398 + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 8), gen_rtx_SUBREG(SImode, operands[1], 8));
11399 + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 4), gen_rtx_SUBREG(SImode, operands[1], 4));
11400 + emit_move_insn(gen_rtx_SUBREG(SImode, operands[0], 0), gen_rtx_SUBREG(SImode, operands[1], 0));
11401 + DONE;
11402 + }
11403 + })
11404 +
11405 +
11406 +(define_insn "*movti_internal"
11407 + [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r, <RKu00,r")
11408 + (match_operand:TI 1 "loadti_operand" " r,RKu00>,r,m"))]
11409 + ""
11410 + "@
11411 + mov\t%T0, %T1\;mov\t%U0, %U1\;mov\t%L0, %L1\;mov\t%B0, %B1
11412 + ldm\t%p1, %0
11413 + stm\t%p0, %1
11414 + ldm\t%p1, %0"
11415 + [(set_attr "length" "8,4,4,4")
11416 + (set_attr "type" "alu,load4,store4,load4")])
11417 +
11418 +
11419 +;;== float - 32 bits ==========================================================
11420 +(define_expand "movsf"
11421 + [(set (match_operand:SF 0 "nonimmediate_operand" "")
11422 + (match_operand:SF 1 "general_operand" ""))]
11423 + ""
11424 + {
11425 +
11426 +
11427 + /* One of the ops has to be in a register. */
11428 + if (GET_CODE (operands[0]) != REG)
11429 + operands[1] = force_reg (SFmode, operands[1]);
11430 +
11431 + })
11432 +
11433 +(define_insn "*movsf_internal"
11434 + [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,m")
11435 + (match_operand:SF 1 "general_operand" "r,G,m,r"))]
11436 + "TARGET_SOFT_FLOAT"
11437 + {
11438 + switch (which_alternative) {
11439 + case 0:
11440 + case 1: return "mov\t%0, %1";
11441 + case 2:
11442 + if ( (REG_P(XEXP(operands[1], 0))
11443 + && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
11444 + || (GET_CODE(XEXP(operands[1], 0)) == PLUS
11445 + && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
11446 + && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
11447 + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
11448 + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
11449 + return "lddsp\t%0, %1";
11450 + else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
11451 + return "lddpc\t%0, %1";
11452 + else
11453 + return "ld.w\t%0, %1";
11454 + case 3:
11455 + if ( (REG_P(XEXP(operands[0], 0))
11456 + && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
11457 + || (GET_CODE(XEXP(operands[0], 0)) == PLUS
11458 + && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
11459 + && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
11460 + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
11461 + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
11462 + return "stdsp\t%0, %1";
11463 + else
11464 + return "st.w\t%0, %1";
11465 + default:
11466 + abort();
11467 + }
11468 + }
11469 +
11470 + [(set_attr "length" "2,4,4,4")
11471 + (set_attr "type" "alu,alu,load,store")])
11472 +
11473 +
11474 +
11475 +;;== double - 64 bits =========================================================
11476 +(define_expand "movdf"
11477 + [(set (match_operand:DF 0 "nonimmediate_operand" "")
11478 + (match_operand:DF 1 "general_operand" ""))]
11479 + ""
11480 + {
11481 + /* One of the ops has to be in a register. */
11482 + if (GET_CODE (operands[0]) != REG){
11483 + operands[1] = force_reg (DFmode, operands[1]);
11484 + }
11485 + })
11486 +
11487 +
11488 +(define_insn_and_split "*movdf_internal"
11489 + [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,r,m")
11490 + (match_operand:DF 1 "general_operand" "r,G,m,r"))]
11491 + "TARGET_SOFT_FLOAT"
11492 + {
11493 + switch (which_alternative ){
11494 + case 0:
11495 + case 1:
11496 + return "mov\t%0, %1\;mov\t%m0, %m1";
11497 + case 2:
11498 + if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
11499 + return "ld.d\t%0, pc[%1 - .]";
11500 + else
11501 + return "ld.d\t%0, %1";
11502 + case 3:
11503 + return "st.d\t%0, %1";
11504 + default:
11505 + abort();
11506 + }
11507 + }
11508 + "TARGET_SOFT_FLOAT
11509 + && reload_completed
11510 + && (REG_P(operands[0]) && REG_P(operands[1]))"
11511 + [(set (match_dup 0) (match_dup 1))
11512 + (set (match_dup 2) (match_dup 3))]
11513 + "
11514 + {
11515 + operands[2] = gen_highpart (SImode, operands[0]);
11516 + operands[0] = gen_lowpart (SImode, operands[0]);
11517 + operands[3] = gen_highpart(SImode, operands[1]);
11518 + operands[1] = gen_lowpart(SImode, operands[1]);
11519 + }
11520 + "
11521 +
11522 + [(set_attr "length" "4,8,4,4")
11523 + (set_attr "type" "alu2,alu2,load2,store2")])
11524 +
11525 +
11526 +
11527 +
11528 +;;=============================================================================
11529 +;; Move chunks of memory
11530 +;;=============================================================================
11531 +
11532 +(define_expand "movmemsi"
11533 + [(match_operand:BLK 0 "general_operand" "")
11534 + (match_operand:BLK 1 "general_operand" "")
11535 + (match_operand:SI 2 "const_int_operand" "")
11536 + (match_operand:SI 3 "const_int_operand" "")]
11537 + ""
11538 + "
11539 + if (avr32_gen_movmemsi (operands))
11540 + DONE;
11541 + FAIL;
11542 + "
11543 + )
11544 +
11545 +
11546 +
11547 +
11548 +;;=============================================================================
11549 +;; Bit field instructions
11550 +;;-----------------------------------------------------------------------------
11551 +;; Instructions to insert or extract bit-fields
11552 +;;=============================================================================
11553 +
11554 +(define_insn "insv"
11555 + [ (set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
11556 + (match_operand:SI 1 "immediate_operand" "Ku05")
11557 + (match_operand:SI 2 "immediate_operand" "Ku05"))
11558 + (match_operand 3 "register_operand" "r"))]
11559 + ""
11560 + "bfins\t%0, %3, %2, %1"
11561 + [(set_attr "type" "alu")
11562 + (set_attr "length" "4")
11563 + (set_attr "cc" "set_ncz")])
11564 +
11565 +
11566 +
11567 +
11568 +(define_insn "extv"
11569 + [ (set (match_operand:SI 0 "register_operand" "=r")
11570 + (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
11571 + (match_operand:SI 2 "immediate_operand" "Ku05")
11572 + (match_operand:SI 3 "immediate_operand" "Ku05")))]
11573 + ""
11574 + "bfexts\t%0, %1, %3, %2"
11575 + [(set_attr "type" "alu")
11576 + (set_attr "length" "4")
11577 + (set_attr "cc" "set_ncz")])
11578 +
11579 +
11580 +(define_insn "extzv"
11581 + [ (set (match_operand:SI 0 "register_operand" "=r")
11582 + (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
11583 + (match_operand:SI 2 "immediate_operand" "Ku05")
11584 + (match_operand:SI 3 "immediate_operand" "Ku05")))]
11585 + ""
11586 + "bfextu\t%0, %1, %3, %2"
11587 + [(set_attr "type" "alu")
11588 + (set_attr "length" "4")
11589 + (set_attr "cc" "set_ncz")])
11590 +
11591 +
11592 +
11593 +;;=============================================================================
11594 +;; Some peepholes for avoiding unnecessary cast instructions
11595 +;; followed by bfins.
11596 +;;-----------------------------------------------------------------------------
11597 +
11598 +(define_peephole2
11599 + [(set (match_operand:SI 0 "register_operand" "")
11600 + (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
11601 + (set (zero_extract:SI (match_operand 2 "register_operand" "")
11602 + (match_operand:SI 3 "immediate_operand" "")
11603 + (match_operand:SI 4 "immediate_operand" ""))
11604 + (match_dup 0))]
11605 + "((peep2_reg_dead_p(2, operands[0]) &&
11606 + (INTVAL(operands[3]) <= 8)))"
11607 + [(set (zero_extract:SI (match_dup 2)
11608 + (match_dup 3)
11609 + (match_dup 4))
11610 + (match_dup 1))]
11611 + )
11612 +
11613 +(define_peephole2
11614 + [(set (match_operand:SI 0 "register_operand" "")
11615 + (zero_extend:SI (match_operand:HI 1 "register_operand" "")))
11616 + (set (zero_extract:SI (match_operand 2 "register_operand" "")
11617 + (match_operand:SI 3 "immediate_operand" "")
11618 + (match_operand:SI 4 "immediate_operand" ""))
11619 + (match_dup 0))]
11620 + "((peep2_reg_dead_p(2, operands[0]) &&
11621 + (INTVAL(operands[3]) <= 16)))"
11622 + [(set (zero_extract:SI (match_dup 2)
11623 + (match_dup 3)
11624 + (match_dup 4))
11625 + (match_dup 1))]
11626 + )
11627 +
11628 +;;=============================================================================
11629 +;; push bytes
11630 +;;-----------------------------------------------------------------------------
11631 +;; Implements the push instruction
11632 +;;=============================================================================
11633 +(define_insn "pushm"
11634 + [(set (mem:BLK (pre_dec:BLK (reg:SI SP_REGNUM)))
11635 + (unspec:BLK [(match_operand 0 "const_int_operand" "")]
11636 + UNSPEC_PUSHM))]
11637 + ""
11638 + {
11639 + if (INTVAL(operands[0])) {
11640 + return "pushm\t%r0";
11641 + } else {
11642 + return "";
11643 + }
11644 + }
11645 + [(set_attr "type" "store")
11646 + (set_attr "length" "2")
11647 + (set_attr "cc" "none")])
11648 +
11649 +(define_insn "stm"
11650 + [(unspec [(match_operand 0 "register_operand" "r")
11651 + (match_operand 1 "const_int_operand" "")
11652 + (match_operand 2 "const_int_operand" "")]
11653 + UNSPEC_STM)]
11654 + ""
11655 + {
11656 + if (INTVAL(operands[1])) {
11657 + if (INTVAL(operands[2]) != 0)
11658 + return "stm\t--%0, %s1";
11659 + else
11660 + return "stm\t%0, %s1";
11661 + } else {
11662 + return "";
11663 + }
11664 + }
11665 + [(set_attr "type" "store")
11666 + (set_attr "length" "4")
11667 + (set_attr "cc" "none")])
11668 +
11669 +
11670 +
11671 +(define_insn "popm"
11672 + [(unspec [(match_operand 0 "const_int_operand" "")]
11673 + UNSPEC_POPM)]
11674 + ""
11675 + {
11676 + if (INTVAL(operands[0])) {
11677 + return "popm %r0";
11678 + } else {
11679 + return "";
11680 + }
11681 + }
11682 + [(set_attr "type" "load")
11683 + (set_attr "length" "2")])
11684 +
11685 +
11686 +
11687 +;;=============================================================================
11688 +;; add
11689 +;;-----------------------------------------------------------------------------
11690 +;; Adds reg1 with reg2 and puts the result in reg0.
11691 +;;=============================================================================
11692 +(define_insn "add<mode>3"
11693 + [(set (match_operand:INTM 0 "register_operand" "=r,r,r,r,r")
11694 + (plus:INTM (match_operand:INTM 1 "register_operand" "%0,r,0,r,0")
11695 + (match_operand:INTM 2 "avr32_add_operand" "r,r,Is08,Is16,Is21")))]
11696 + ""
11697 + "@
11698 + add %0, %2
11699 + add %0, %1, %2
11700 + sub %0, %n2
11701 + sub %0, %1, %n2
11702 + sub %0, %n2"
11703 +
11704 + [(set_attr "length" "2,4,2,4,4")
11705 + (set_attr "cc" "<INTM:alu_cc_attr>")])
11706 +
11707 +(define_insn "*addsi3_lsl"
11708 + [(set (match_operand:SI 0 "register_operand" "=r")
11709 + (plus:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
11710 + (match_operand:SI 3 "avr32_add_shift_immediate_operand" "Ku02"))
11711 + (match_operand:SI 2 "register_operand" "r")))]
11712 + ""
11713 + "add %0, %2, %1 << %3"
11714 + [(set_attr "length" "4")
11715 + (set_attr "cc" "set_vncz")])
11716 +
11717 +
11718 +(define_insn "*addsi3_mul"
11719 + [(set (match_operand:SI 0 "register_operand" "=r")
11720 + (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
11721 + (match_operand:SI 3 "immediate_operand" "Ku04" ))
11722 + (match_operand:SI 2 "register_operand" "r")))]
11723 + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
11724 + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
11725 + "add %0, %2, %1 << %p3"
11726 + [(set_attr "length" "4")
11727 + (set_attr "cc" "set_vncz")])
11728 +
11729 +
11730 +(define_peephole2
11731 + [(set (match_operand:SI 0 "register_operand" "")
11732 + (ashift:SI (match_operand:SI 1 "register_operand" "")
11733 + (match_operand:SI 2 "immediate_operand" "")))
11734 + (set (match_operand:SI 3 "register_operand" "")
11735 + (plus:SI (match_dup 0)
11736 + (match_operand:SI 4 "register_operand" "")))]
11737 + "(peep2_reg_dead_p(2, operands[0]) &&
11738 + (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
11739 + [(set (match_dup 3)
11740 + (plus:SI (ashift:SI (match_dup 1)
11741 + (match_dup 2))
11742 + (match_dup 4)))]
11743 + )
11744 +
11745 +(define_peephole2
11746 + [(set (match_operand:SI 0 "register_operand" "")
11747 + (ashift:SI (match_operand:SI 1 "register_operand" "")
11748 + (match_operand:SI 2 "immediate_operand" "")))
11749 + (set (match_operand:SI 3 "register_operand" "")
11750 + (plus:SI (match_operand:SI 4 "register_operand" "")
11751 + (match_dup 0)))]
11752 + "(peep2_reg_dead_p(2, operands[0]) &&
11753 + (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
11754 + [(set (match_dup 3)
11755 + (plus:SI (ashift:SI (match_dup 1)
11756 + (match_dup 2))
11757 + (match_dup 4)))]
11758 + )
11759 +
11760 +(define_insn "adddi3"
11761 + [(set (match_operand:DI 0 "register_operand" "=r,r")
11762 + (plus:DI (match_operand:DI 1 "register_operand" "%r,0")
11763 + (match_operand:DI 2 "register_operand" "r,r")))]
11764 + ""
11765 + "@
11766 + add %0, %1, %2\;adc %m0, %m1, %m2
11767 + add %0, %2\;adc %m0, %m0, %m2"
11768 + [(set_attr "length" "8,6")
11769 + (set_attr "type" "alu2")
11770 + (set_attr "cc" "set_vncz")])
11771 +
11772 +
11773 +
11774 +;;=============================================================================
11775 +;; subtract
11776 +;;-----------------------------------------------------------------------------
11777 +;; Subtract reg2 or immediate value from reg0 and puts the result in reg0.
11778 +;;=============================================================================
11779 +
11780 +(define_peephole2
11781 + [(set (match_operand:QI 0 "register_operand" "")
11782 + (minus:QI (match_operand:QI 1 "general_operand" "")
11783 + (match_operand:QI 2 "general_operand" "")))
11784 + (set (match_operand:QI 3 "register_operand" "")
11785 + (match_dup 0))]
11786 + "peep2_reg_dead_p(2, operands[0])"
11787 + [(set (match_dup 3)
11788 + (minus:QI (match_dup 1) (match_dup 2)))]
11789 + )
11790 +
11791 +(define_peephole
11792 + [(set (match_operand:QI 0 "register_operand" "")
11793 + (minus:QI (match_operand:QI 1 "immediate_operand" "Ks08")
11794 + (match_operand:QI 2 "register_operand" "r")))
11795 + (set (match_operand:QI 3 "register_operand" "r")
11796 + (match_dup 0))]
11797 + "dead_or_set_p(insn, operands[0])"
11798 + "rsub %3, %2, %1"
11799 + [(set_attr "length" "4")
11800 + (set_attr "cc" "clobber")]
11801 + )
11802 +
11803 +
11804 +
11805 +(define_insn "sub<mode>3"
11806 + [(set (match_operand:INTM 0 "general_operand" "=r,r,r,r,r,r,r")
11807 + (minus:INTM (match_operand:INTM 1 "nonmemory_operand" "0,r,0,r,0,r,Ks08")
11808 + (match_operand:INTM 2 "nonmemory_operand" "r,r,Ks08,Ks16,Ks21,0,r")))]
11809 + ""
11810 + "@
11811 + sub %0, %2
11812 + sub %0, %1, %2
11813 + sub %0, %2
11814 + sub %0, %1, %2
11815 + sub %0, %2
11816 + rsub %0, %1
11817 + rsub %0, %2, %1"
11818 + [(set_attr "length" "2,4,2,4,4,2,4")
11819 + (set_attr "cc" "<INTM:alu_cc_attr>")])
11820 +
11821 +(define_insn "*sub<mode>3_mul"
11822 + [(set (match_operand:INTM 0 "register_operand" "=r,r,r")
11823 + (minus:INTM (match_operand:INTM 1 "register_operand" "r,0,r")
11824 + (mult:INTM (match_operand:INTM 2 "register_operand" "r,r,0")
11825 + (match_operand:SI 3 "immediate_operand" "Ku04,Ku04,Ku04" ))))]
11826 + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
11827 + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
11828 + "@
11829 + sub %0, %1, %2 << %p3
11830 + sub %0, %0, %2 << %p3
11831 + sub %0, %1, %0 << %p3"
11832 + [(set_attr "length" "4,4,4")
11833 + (set_attr "cc" "<INTM:alu_cc_attr>")])
11834 +
11835 +(define_insn "*sub<mode>3_lsl"
11836 + [(set (match_operand:INTM 0 "register_operand" "=r")
11837 + (minus:INTM (ashift:INTM (match_operand:INTM 1 "register_operand" "r")
11838 + (match_operand:SI 3 "avr32_add_shift_immediate_operand" "Ku02"))
11839 + (match_operand:INTM 2 "register_operand" "r")))]
11840 + ""
11841 + "sub %0, %2, %1 << %3"
11842 + [(set_attr "length" "4")
11843 + (set_attr "cc" "<INTM:alu_cc_attr>")])
11844 +
11845 +
11846 +(define_insn "subdi3"
11847 + [(set (match_operand:DI 0 "register_operand" "=r,r")
11848 + (minus:DI (match_operand:DI 1 "register_operand" "%r,0")
11849 + (match_operand:DI 2 "register_operand" "r,r")))]
11850 + ""
11851 + "@
11852 + sub %0, %1, %2\;sbc %m0, %m1, %m2
11853 + sub %0, %2\;sbc %m0, %m0, %m2"
11854 + [(set_attr "length" "8,6")
11855 + (set_attr "type" "alu2")
11856 + (set_attr "cc" "set_vncz")])
11857 +
11858 +
11859 +
11860 +;;=============================================================================
11861 +;; multiply
11862 +;;-----------------------------------------------------------------------------
11863 +;; Multiply op1 and op2 and put the value in op0.
11864 +;;=============================================================================
11865 +
11866 +
11867 +(define_insn "mulqi3"
11868 + [(set (match_operand:QI 0 "register_operand" "=r,r,r")
11869 + (mult:QI (match_operand:QI 1 "register_operand" "%0,r,r")
11870 + (match_operand:QI 2 "avr32_mul_operand" "r,r,Ks08")))]
11871 + ""
11872 + {
11873 + switch (which_alternative){
11874 + case 0:
11875 + return "mul %0, %2";
11876 + case 1:
11877 + return "mul %0, %1, %2";
11878 + case 2:
11879 + return "mul %0, %1, %2";
11880 + default:
11881 + abort();
11882 + }
11883 + }
11884 + [(set_attr "type" "mulww_w,mulww_w,mulwh")
11885 + (set_attr "length" "2,4,4")
11886 + (set_attr "cc" "none")])
11887 +
11888 +(define_insn "mulsi3"
11889 + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
11890 + (mult:SI (match_operand:SI 1 "register_operand" "%0,r,r")
11891 + (match_operand:SI 2 "avr32_mul_operand" "r,r,Ks08")))]
11892 + ""
11893 + {
11894 + switch (which_alternative){
11895 + case 0:
11896 + return "mul %0, %2";
11897 + case 1:
11898 + return "mul %0, %1, %2";
11899 + case 2:
11900 + return "mul %0, %1, %2";
11901 + default:
11902 + abort();
11903 + }
11904 + }
11905 + [(set_attr "type" "mulww_w,mulww_w,mulwh")
11906 + (set_attr "length" "2,4,4")
11907 + (set_attr "cc" "none")])
11908 +
11909 +
11910 +(define_insn "mulhisi3"
11911 + [(set (match_operand:SI 0 "register_operand" "=r")
11912 + (mult:SI
11913 + (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
11914 + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
11915 + "TARGET_DSP"
11916 + "mulhh.w %0, %1:b, %2:b"
11917 + [(set_attr "type" "mulhh")
11918 + (set_attr "length" "4")
11919 + (set_attr "cc" "none")])
11920 +
11921 +(define_peephole2
11922 + [(match_scratch:DI 6 "r")
11923 + (set (match_operand:SI 0 "register_operand" "")
11924 + (mult:SI
11925 + (sign_extend:SI (match_operand:HI 1 "register_operand" ""))
11926 + (sign_extend:SI (match_operand:HI 2 "register_operand" ""))))
11927 + (set (match_operand:SI 3 "register_operand" "")
11928 + (ashiftrt:SI (match_dup 0)
11929 + (const_int 16)))]
11930 + "TARGET_DSP
11931 + && (peep2_reg_dead_p(1, operands[0]) || (REGNO(operands[0]) == REGNO(operands[3])))"
11932 + [(set (match_dup 4) (sign_extend:SI (match_dup 1)))
11933 + (set (match_dup 6)
11934 + (ashift:DI (mult:DI (sign_extend:DI (match_dup 4))
11935 + (sign_extend:DI (match_dup 2)))
11936 + (const_int 16)))
11937 + (set (match_dup 3) (match_dup 5))]
11938 +
11939 + "{
11940 + operands[4] = gen_rtx_REG(SImode, REGNO(operands[1]));
11941 + operands[5] = gen_highpart (SImode, operands[4]);
11942 + }"
11943 + )
11944 +
11945 +(define_insn "mulnhisi3"
11946 + [(set (match_operand:SI 0 "register_operand" "=r")
11947 + (mult:SI
11948 + (sign_extend:SI (neg:HI (match_operand:HI 1 "register_operand" "r")))
11949 + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
11950 + "TARGET_DSP"
11951 + "mulnhh.w %0, %1:b, %2:b"
11952 + [(set_attr "type" "mulhh")
11953 + (set_attr "length" "4")
11954 + (set_attr "cc" "none")])
11955 +
11956 +(define_insn "machisi3"
11957 + [(set (match_operand:SI 0 "register_operand" "+r")
11958 + (plus:SI (mult:SI
11959 + (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
11960 + (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
11961 + (match_dup 0)))]
11962 + "TARGET_DSP"
11963 + "machh.w %0, %1:b, %2:b"
11964 + [(set_attr "type" "machh_w")
11965 + (set_attr "length" "4")
11966 + (set_attr "cc" "none")])
11967 +
11968 +
11969 +
11970 +(define_insn "mulsidi3"
11971 + [(set (match_operand:DI 0 "register_operand" "=r")
11972 + (mult:DI
11973 + (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
11974 + (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
11975 + ""
11976 + "muls.d %0, %1, %2"
11977 + [(set_attr "type" "mulww_d")
11978 + (set_attr "length" "4")
11979 + (set_attr "cc" "none")])
11980 +
11981 +(define_insn "umulsidi3"
11982 + [(set (match_operand:DI 0 "register_operand" "=r")
11983 + (mult:DI
11984 + (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
11985 + (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
11986 + ""
11987 + "mulu.d %0, %1, %2"
11988 + [(set_attr "type" "mulww_d")
11989 + (set_attr "length" "4")
11990 + (set_attr "cc" "none")])
11991 +
11992 +(define_insn "*mulaccsi3"
11993 + [(set (match_operand:SI 0 "register_operand" "+r")
11994 + (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r")
11995 + (match_operand:SI 2 "register_operand" "r"))
11996 + (match_dup 0)))]
11997 + ""
11998 + "mac %0, %1, %2"
11999 + [(set_attr "type" "macww_w")
12000 + (set_attr "length" "4")
12001 + (set_attr "cc" "none")])
12002 +
12003 +(define_insn "mulaccsidi3"
12004 + [(set (match_operand:DI 0 "register_operand" "+r")
12005 + (plus:DI (mult:DI
12006 + (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
12007 + (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
12008 + (match_dup 0)))]
12009 + ""
12010 + "macs.d %0, %1, %2"
12011 + [(set_attr "type" "macww_d")
12012 + (set_attr "length" "4")
12013 + (set_attr "cc" "none")])
12014 +
12015 +(define_insn "umulaccsidi3"
12016 + [(set (match_operand:DI 0 "register_operand" "+r")
12017 + (plus:DI (mult:DI
12018 + (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
12019 + (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
12020 + (match_dup 0)))]
12021 + ""
12022 + "macu.d %0, %1, %2"
12023 + [(set_attr "type" "macww_d")
12024 + (set_attr "length" "4")
12025 + (set_attr "cc" "none")])
12026 +
12027 +
12028 +
12029 +;; Try to avoid Write-After-Write hazards for mul operations
12030 +;; if it can be done
12031 +(define_peephole2
12032 + [(set (match_operand:SI 0 "register_operand" "")
12033 + (mult:SI
12034 + (sign_extend:SI (match_operand 1 "general_operand" ""))
12035 + (sign_extend:SI (match_operand 2 "general_operand" ""))))
12036 + (set (match_dup 0)
12037 + (match_operator:SI 3 "alu_operator" [(match_dup 0)
12038 + (match_operand 4 "general_operand" "")]))]
12039 + "peep2_reg_dead_p(1, operands[2])"
12040 + [(set (match_dup 5)
12041 + (mult:SI
12042 + (sign_extend:SI (match_dup 1))
12043 + (sign_extend:SI (match_dup 2))))
12044 + (set (match_dup 0)
12045 + (match_op_dup 3 [(match_dup 5)
12046 + (match_dup 4)]))]
12047 + "{operands[5] = gen_rtx_REG(SImode, REGNO(operands[2]));}"
12048 + )
12049 +
12050 +
12051 +
12052 +;;=============================================================================
12053 +;; DSP instructions
12054 +;;=============================================================================
12055 +(define_insn "mulsathh_h"
12056 + [(set (match_operand:HI 0 "register_operand" "=r")
12057 + (ss_truncate:HI (ashiftrt:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
12058 + (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
12059 + (const_int 15))))]
12060 + "TARGET_DSP"
12061 + "mulsathh.h\t%0, %1:b, %2:b"
12062 + [(set_attr "length" "4")
12063 + (set_attr "cc" "none")
12064 + (set_attr "type" "mulhh")])
12065 +
12066 +(define_insn "mulsatrndhh_h"
12067 + [(set (match_operand:HI 0 "register_operand" "=r")
12068 + (ss_truncate:HI (ashiftrt:SI
12069 + (plus:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
12070 + (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
12071 + (const_int 1073741824))
12072 + (const_int 15))))]
12073 + "TARGET_DSP"
12074 + "mulsatrndhh.h\t%0, %1:b, %2:b"
12075 + [(set_attr "length" "4")
12076 + (set_attr "cc" "none")
12077 + (set_attr "type" "mulhh")])
12078 +
12079 +(define_insn "mulsathh_w"
12080 + [(set (match_operand:SI 0 "register_operand" "=r")
12081 + (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
12082 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12083 + (const_int 1))))]
12084 + "TARGET_DSP"
12085 + "mulsathh.w\t%0, %1:b, %2:b"
12086 + [(set_attr "length" "4")
12087 + (set_attr "cc" "none")
12088 + (set_attr "type" "mulhh")])
12089 +
12090 +(define_insn "mulsatwh_w"
12091 + [(set (match_operand:SI 0 "register_operand" "=r")
12092 + (ss_truncate:SI (ashiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
12093 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12094 + (const_int 15))))]
12095 + "TARGET_DSP"
12096 + "mulsatwh.w\t%0, %1, %2:b"
12097 + [(set_attr "length" "4")
12098 + (set_attr "cc" "none")
12099 + (set_attr "type" "mulwh")])
12100 +
12101 +(define_insn "mulsatrndwh_w"
12102 + [(set (match_operand:SI 0 "register_operand" "=r")
12103 + (ss_truncate:SI (ashiftrt:DI (plus:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
12104 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12105 + (const_int 1073741824))
12106 + (const_int 15))))]
12107 + "TARGET_DSP"
12108 + "mulsatrndwh.w\t%0, %1, %2:b"
12109 + [(set_attr "length" "4")
12110 + (set_attr "cc" "none")
12111 + (set_attr "type" "mulwh")])
12112 +
12113 +(define_insn "macsathh_w"
12114 + [(set (match_operand:SI 0 "register_operand" "+r")
12115 + (plus:SI (match_dup 0)
12116 + (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
12117 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12118 + (const_int 1)))))]
12119 + "TARGET_DSP"
12120 + "macsathh.w\t%0, %1:b, %2:b"
12121 + [(set_attr "length" "4")
12122 + (set_attr "cc" "none")
12123 + (set_attr "type" "mulhh")])
12124 +
12125 +
12126 +(define_insn "mulwh_d"
12127 + [(set (match_operand:DI 0 "register_operand" "=r")
12128 + (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
12129 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12130 + (const_int 16)))]
12131 + "TARGET_DSP"
12132 + "mulwh.d\t%0, %1, %2:b"
12133 + [(set_attr "length" "4")
12134 + (set_attr "cc" "none")
12135 + (set_attr "type" "mulwh")])
12136 +
12137 +
12138 +(define_insn "mulnwh_d"
12139 + [(set (match_operand:DI 0 "register_operand" "=r")
12140 + (ashift:DI (mult:DI (not:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")))
12141 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12142 + (const_int 16)))]
12143 + "TARGET_DSP"
12144 + "mulnwh.d\t%0, %1, %2:b"
12145 + [(set_attr "length" "4")
12146 + (set_attr "cc" "none")
12147 + (set_attr "type" "mulwh")])
12148 +
12149 +(define_insn "macwh_d"
12150 + [(set (match_operand:DI 0 "register_operand" "+r")
12151 + (plus:DI (match_dup 0)
12152 + (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
12153 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
12154 + (const_int 16))))]
12155 + "TARGET_DSP"
12156 + "macwh.d\t%0, %1, %2:b"
12157 + [(set_attr "length" "4")
12158 + (set_attr "cc" "none")
12159 + (set_attr "type" "mulwh")])
12160 +
12161 +(define_insn "machh_d"
12162 + [(set (match_operand:DI 0 "register_operand" "+r")
12163 + (plus:DI (match_dup 0)
12164 + (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
12165 + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))))]
12166 + "TARGET_DSP"
12167 + "machh.d\t%0, %1:b, %2:b"
12168 + [(set_attr "length" "4")
12169 + (set_attr "cc" "none")
12170 + (set_attr "type" "mulwh")])
12171 +
12172 +(define_insn "satadd_w"
12173 + [(set (match_operand:SI 0 "register_operand" "=r")
12174 + (ss_plus:SI (match_operand:SI 1 "register_operand" "r")
12175 + (match_operand:SI 2 "register_operand" "r")))]
12176 + "TARGET_DSP"
12177 + "satadd.w\t%0, %1, %2"
12178 + [(set_attr "length" "4")
12179 + (set_attr "cc" "none")
12180 + (set_attr "type" "alu_sat")])
12181 +
12182 +(define_insn "satsub_w"
12183 + [(set (match_operand:SI 0 "register_operand" "=r")
12184 + (ss_minus:SI (match_operand:SI 1 "register_operand" "r")
12185 + (match_operand:SI 2 "register_operand" "r")))]
12186 + "TARGET_DSP"
12187 + "satsub.w\t%0, %1, %2"
12188 + [(set_attr "length" "4")
12189 + (set_attr "cc" "none")
12190 + (set_attr "type" "alu_sat")])
12191 +
12192 +(define_insn "satadd_h"
12193 + [(set (match_operand:HI 0 "register_operand" "=r")
12194 + (ss_plus:HI (match_operand:HI 1 "register_operand" "r")
12195 + (match_operand:HI 2 "register_operand" "r")))]
12196 + "TARGET_DSP"
12197 + "satadd.h\t%0, %1, %2"
12198 + [(set_attr "length" "4")
12199 + (set_attr "cc" "none")
12200 + (set_attr "type" "alu_sat")])
12201 +
12202 +(define_insn "satsub_h"
12203 + [(set (match_operand:HI 0 "register_operand" "=r")
12204 + (ss_minus:HI (match_operand:HI 1 "register_operand" "r")
12205 + (match_operand:HI 2 "register_operand" "r")))]
12206 + "TARGET_DSP"
12207 + "satsub.h\t%0, %1, %2"
12208 + [(set_attr "length" "4")
12209 + (set_attr "cc" "none")
12210 + (set_attr "type" "alu_sat")])
12211 +
12212 +
12213 +;;=============================================================================
12214 +;; smin
12215 +;;-----------------------------------------------------------------------------
12216 +;; Set reg0 to the smallest value of reg1 and reg2. It is used for signed
12217 +;; values in the registers.
12218 +;;=============================================================================
12219 +(define_insn "sminsi3"
12220 + [(set (match_operand:SI 0 "register_operand" "=r")
12221 + (smin:SI (match_operand:SI 1 "register_operand" "r")
12222 + (match_operand:SI 2 "register_operand" "r")))]
12223 + ""
12224 + "min %0, %1, %2"
12225 + [(set_attr "length" "4")
12226 + (set_attr "cc" "none")])
12227 +
12228 +;;=============================================================================
12229 +;; smax
12230 +;;-----------------------------------------------------------------------------
12231 +;; Set reg0 to the largest value of reg1 and reg2. It is used for signed
12232 +;; values in the registers.
12233 +;;=============================================================================
12234 +(define_insn "smaxsi3"
12235 + [(set (match_operand:SI 0 "register_operand" "=r")
12236 + (smax:SI (match_operand:SI 1 "register_operand" "r")
12237 + (match_operand:SI 2 "register_operand" "r")))]
12238 + ""
12239 + "max %0, %1, %2"
12240 + [(set_attr "length" "4")
12241 + (set_attr "cc" "none")])
12242 +
12243 +
12244 +;;=============================================================================
12245 +;; Logical operations
12246 +;;-----------------------------------------------------------------------------
12247 +
12248 +;; Split up simple DImode logical operations. Simply perform the logical
12249 +;; operation on the upper and lower halves of the registers.
12250 +(define_split
12251 + [(set (match_operand:DI 0 "register_operand" "")
12252 + (match_operator:DI 6 "logical_binary_operator"
12253 + [(match_operand:DI 1 "register_operand" "")
12254 + (match_operand:DI 2 "register_operand" "")]))]
12255 + "reload_completed"
12256 + [(set (match_dup 0) (match_op_dup:SI 6 [(match_dup 1) (match_dup 2)]))
12257 + (set (match_dup 3) (match_op_dup:SI 6 [(match_dup 4) (match_dup 5)]))]
12258 + "
12259 + {
12260 + operands[3] = gen_highpart (SImode, operands[0]);
12261 + operands[0] = gen_lowpart (SImode, operands[0]);
12262 + operands[4] = gen_highpart (SImode, operands[1]);
12263 + operands[1] = gen_lowpart (SImode, operands[1]);
12264 + operands[5] = gen_highpart (SImode, operands[2]);
12265 + operands[2] = gen_lowpart (SImode, operands[2]);
12266 + }"
12267 +)
12268 +
12269 +;;=============================================================================
12270 +;; Logical operations with shifted operand
12271 +;;=============================================================================
12272 +(define_insn "<code>si_lshift"
12273 + [(set (match_operand:SI 0 "register_operand" "=r")
12274 + (logical:SI (match_operator:SI 4 "logical_shift_operator"
12275 + [(match_operand:SI 2 "register_operand" "r")
12276 + (match_operand:SI 3 "immediate_operand" "Ku05")])
12277 + (match_operand:SI 1 "register_operand" "r")))]
12278 + ""
12279 + {
12280 + if ( GET_CODE(operands[4]) == ASHIFT )
12281 + return "<logical_insn>\t%0, %1, %2 << %3";
12282 + else
12283 + return "<logical_insn>\t%0, %1, %2 >> %3";
12284 + }
12285 +
12286 + [(set_attr "cc" "set_z")]
12287 +)
12288 +
12289 +
12290 +;;************************************************
12291 +;; Peepholes for detecting logical operantions
12292 +;; with shifted operands
12293 +;;************************************************
12294 +
12295 +(define_peephole
12296 + [(set (match_operand:SI 3 "register_operand" "")
12297 + (match_operator:SI 5 "logical_shift_operator"
12298 + [(match_operand:SI 1 "register_operand" "")
12299 + (match_operand:SI 2 "immediate_operand" "")]))
12300 + (set (match_operand:SI 0 "register_operand" "")
12301 + (logical:SI (match_operand:SI 4 "register_operand" "")
12302 + (match_dup 3)))]
12303 + "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
12304 + {
12305 + if ( GET_CODE(operands[5]) == ASHIFT )
12306 + return "<logical_insn>\t%0, %4, %1 << %2";
12307 + else
12308 + return "<logical_insn>\t%0, %4, %1 >> %2";
12309 + }
12310 + [(set_attr "cc" "set_z")]
12311 + )
12312 +
12313 +(define_peephole
12314 + [(set (match_operand:SI 3 "register_operand" "")
12315 + (match_operator:SI 5 "logical_shift_operator"
12316 + [(match_operand:SI 1 "register_operand" "")
12317 + (match_operand:SI 2 "immediate_operand" "")]))
12318 + (set (match_operand:SI 0 "register_operand" "")
12319 + (logical:SI (match_dup 3)
12320 + (match_operand:SI 4 "register_operand" "")))]
12321 + "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
12322 + {
12323 + if ( GET_CODE(operands[5]) == ASHIFT )
12324 + return "<logical_insn>\t%0, %4, %1 << %2";
12325 + else
12326 + return "<logical_insn>\t%0, %4, %1 >> %2";
12327 + }
12328 + [(set_attr "cc" "set_z")]
12329 + )
12330 +
12331 +
12332 +(define_peephole2
12333 + [(set (match_operand:SI 0 "register_operand" "")
12334 + (match_operator:SI 5 "logical_shift_operator"
12335 + [(match_operand:SI 1 "register_operand" "")
12336 + (match_operand:SI 2 "immediate_operand" "")]))
12337 + (set (match_operand:SI 3 "register_operand" "")
12338 + (logical:SI (match_operand:SI 4 "register_operand" "")
12339 + (match_dup 0)))]
12340 + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
12341 +
12342 + [(set (match_dup 3)
12343 + (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
12344 + (match_dup 4)))]
12345 +
12346 + ""
12347 +)
12348 +
12349 +(define_peephole2
12350 + [(set (match_operand:SI 0 "register_operand" "")
12351 + (match_operator:SI 5 "logical_shift_operator"
12352 + [(match_operand:SI 1 "register_operand" "")
12353 + (match_operand:SI 2 "immediate_operand" "")]))
12354 + (set (match_operand:SI 3 "register_operand" "")
12355 + (logical:SI (match_dup 0)
12356 + (match_operand:SI 4 "register_operand" "")))]
12357 + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
12358 +
12359 + [(set (match_dup 3)
12360 + (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
12361 + (match_dup 4)))]
12362 +
12363 + ""
12364 +)
12365 +
12366 +
12367 +;;=============================================================================
12368 +;; and
12369 +;;-----------------------------------------------------------------------------
12370 +;; Store the result after a bitwise logical-and between reg0 and reg2 in reg0.
12371 +;;=============================================================================
12372 +
12373 +(define_insn "andnsi"
12374 + [(set (match_operand:SI 0 "register_operand" "=r")
12375 + (and:SI (match_operand:SI 1 "register_operand" "0")
12376 + (not:SI (match_operand:SI 2 "register_operand" "r"))))]
12377 + ""
12378 + "andn %0, %2"
12379 + [(set_attr "cc" "set_z")
12380 + (set_attr "length" "2")]
12381 +)
12382 +
12383 +
12384 +
12385 +
12386 +(define_insn "andsi3"
12387 + [(set (match_operand:SI 0 "register_operand" "=r, r, r, r")
12388 + (and:SI (match_operand:SI 1 "register_operand" "%0, r, 0, r")
12389 + (match_operand:SI 2 "nonmemory_operand" "r, M, i, r")))]
12390 + ""
12391 + {
12392 + switch (which_alternative){
12393 + case 0:
12394 + return "and\t%0, %2";
12395 + case 1:
12396 + {
12397 + int i, first_set = -1;
12398 + /* Search for first bit set in mask */
12399 + for ( i = 31; i >= 0; --i )
12400 + if ( INTVAL(operands[2]) & (1 << i) ){
12401 + first_set = i;
12402 + break;
12403 + }
12404 + operands[2] = gen_rtx_CONST_INT(SImode, first_set + 1);
12405 + return "bfextu\t%0, %1, 0, %2";
12406 + }
12407 + case 2:
12408 + if ( one_bit_cleared_operand(operands[2], VOIDmode) ){
12409 + int bitpos;
12410 + for ( bitpos = 0; bitpos < 32; bitpos++ )
12411 + if ( !(INTVAL(operands[2]) & (1 << bitpos)) )
12412 + break;
12413 + operands[2] = gen_rtx_CONST_INT(SImode, bitpos);
12414 + return "cbr\t%0, %2";
12415 + } else if ( (INTVAL(operands[2]) >= 0) &&
12416 + (INTVAL(operands[2]) <= 65535) )
12417 + return "andl\t%0, %2, COH";
12418 + else if ( (INTVAL(operands[2]) < 0) &&
12419 + (INTVAL(operands[2]) >= -65536 ) )
12420 + return "andl\t%0, lo(%2)";
12421 + else if ( ((INTVAL(operands[2]) & 0xffff) == 0xffff) )
12422 + return "andh\t%0, hi(%2)";
12423 + else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
12424 + return "andh\t%0, hi(%2), COH";
12425 + else
12426 + return "andh\t%0, hi(%2)\;andl\t%0, lo(%2)";
12427 + case 3:
12428 + return "and\t%0, %1, %2";
12429 + default:
12430 + abort();
12431 + }
12432 + }
12433 +
12434 + [(set_attr "length" "2,4,8,4")
12435 + (set_attr "cc" "set_z")])
12436 +
12437 +
12438 +(define_insn "anddi3"
12439 + [(set (match_operand:DI 0 "register_operand" "=&r,&r")
12440 + (and:DI (match_operand:DI 1 "register_operand" "%0,r")
12441 + (match_operand:DI 2 "register_operand" "r,r")))]
12442 + ""
12443 + "#"
12444 + [(set_attr "length" "8")
12445 + (set_attr "cc" "clobber")]
12446 +)
12447 +
12448 +;;=============================================================================
12449 +;; or
12450 +;;-----------------------------------------------------------------------------
12451 +;; Store the result after a bitwise inclusive-or between reg0 and reg2 in reg0.
12452 +;;=============================================================================
12453 +
12454 +(define_insn "iorsi3"
12455 + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
12456 + (ior:SI (match_operand:SI 1 "register_operand" "%0,0,r" )
12457 + (match_operand:SI 2 "nonmemory_operand" "r ,i,r")))]
12458 + ""
12459 + {
12460 + switch (which_alternative){
12461 + case 0:
12462 + return "or\t%0, %2";
12463 + case 1:
12464 + if ( one_bit_set_operand(operands[2], VOIDmode) ){
12465 + int bitpos;
12466 + for (bitpos = 0; bitpos < 32; bitpos++)
12467 + if (INTVAL(operands[2]) & (1 << bitpos))
12468 + break;
12469 + operands[2] = gen_rtx_CONST_INT( SImode, bitpos);
12470 + return "sbr\t%0, %2";
12471 + } else if ( (INTVAL(operands[2]) >= 0) &&
12472 + (INTVAL(operands[2]) <= 65535) )
12473 + return "orl\t%0, %2";
12474 + else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
12475 + return "orh\t%0, hi(%2)";
12476 + else
12477 + return "orh\t%0, hi(%2)\;orl\t%0, lo(%2)";
12478 + case 2:
12479 + return "or\t%0, %1, %2";
12480 + default:
12481 + abort();
12482 + }
12483 + }
12484 + [(set_attr "length" "2,8,4")
12485 + (set_attr "cc" "set_z")])
12486 +
12487 +
12488 +;(define_insn "iorsi3"
12489 +; [(set (match_operand:SI 0 "register_operand" "=r, r, r")
12490 +; (ior:SI (match_operand:SI 1 "avr32_logical_insn_operand" "r, r, rA" )
12491 +; (match_operand:SI 2 "register_operand" "0, i, r")))]
12492 +; ""
12493 +; {
12494 +; switch (which_alternative){
12495 +; case 0:
12496 +; return "or %0, %2";
12497 +; case 1:
12498 +; if ( one_bit_set_operand(operands[2], VOIDmode) ){
12499 +; int i, bitpos;
12500 +; for ( i = 0; i < 32; i++ )
12501 +; if ( INTVAL(operands[2]) & (1 << i) ){
12502 +; bitpos = i;
12503 +; break;
12504 +; }
12505 +; operands[2] = gen_rtx_CONST_INT( SImode, bitpos);
12506 +; return "sbr %0, %2";
12507 +; } else if ( (INTVAL(operands[2]) >= 0) &&
12508 +; (INTVAL(operands[2]) <= 65535) )
12509 +; return "orl %0, %2";
12510 +; else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
12511 +; return "orh %0, hi(%2)";
12512 +; else
12513 +; return "orh %0, hi(%2)\;orl %0, lo(%2)";
12514 +; case 2:
12515 +; return "or %0, %2, %1";
12516 +; }
12517 +; }
12518 +; [(set_attr "length" "2,8,4")
12519 +; (set_attr "cc" "set_z")])
12520 +
12521 +(define_insn "iordi3"
12522 + [(set (match_operand:DI 0 "register_operand" "=&r,&r")
12523 + (ior:DI (match_operand:DI 1 "register_operand" "%0,r")
12524 + (match_operand:DI 2 "register_operand" "r,r")))]
12525 + ""
12526 + "#"
12527 + [(set_attr "length" "8")
12528 + (set_attr "cc" "clobber")]
12529 +)
12530 +
12531 +;;=============================================================================
12532 +;; xor bytes
12533 +;;-----------------------------------------------------------------------------
12534 +;; Store the result after a bitwise exclusive-or between reg0 and reg2 in reg0.
12535 +;;=============================================================================
12536 +
12537 +(define_insn "xorsi3"
12538 + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
12539 + (xor:SI (match_operand:SI 1 "register_operand" "0,0,r")
12540 + (match_operand:SI 2 "nonmemory_operand" "r,i,r")))]
12541 + ""
12542 + {
12543 + switch (which_alternative){
12544 + case 0:
12545 + return "eor %0, %2";
12546 + case 1:
12547 + if ( (INTVAL(operands[2]) >= 0) &&
12548 + (INTVAL(operands[2]) <= 65535) )
12549 + return "eorl %0, %2";
12550 + else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
12551 + return "eorh %0, hi(%2)";
12552 + else
12553 + return "eorh %0, hi(%2)\;eorl %0, lo(%2)";
12554 + case 2:
12555 + return "eor %0, %1, %2";
12556 + default:
12557 + abort();
12558 + }
12559 + }
12560 +
12561 + [(set_attr "length" "2,8,4")
12562 + (set_attr "cc" "set_z")])
12563 +
12564 +(define_insn "xordi3"
12565 + [(set (match_operand:DI 0 "register_operand" "=&r,&r")
12566 + (xor:DI (match_operand:DI 1 "register_operand" "%0,r")
12567 + (match_operand:DI 2 "register_operand" "r,r")))]
12568 + ""
12569 + "#"
12570 + [(set_attr "length" "8")
12571 + (set_attr "cc" "clobber")]
12572 +)
12573 +
12574 +;;=============================================================================
12575 +;; divmod
12576 +;;-----------------------------------------------------------------------------
12577 +;; Signed division that produces both a quotient and a remainder.
12578 +;;=============================================================================
12579 +(define_expand "divmodsi4"
12580 + [(parallel [
12581 + (parallel [
12582 + (set (match_operand:SI 0 "register_operand" "=r")
12583 + (div:SI (match_operand:SI 1 "register_operand" "r")
12584 + (match_operand:SI 2 "register_operand" "r")))
12585 + (set (match_operand:SI 3 "register_operand" "=r")
12586 + (mod:SI (match_dup 1)
12587 + (match_dup 2)))])
12588 + (use (match_dup 4))])]
12589 + ""
12590 + {
12591 + if (! no_new_pseudos) {
12592 + operands[4] = gen_reg_rtx (DImode);
12593 +
12594 + emit_insn(gen_divmodsi4_internal(operands[4],operands[1],operands[2]));
12595 + emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
12596 + emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
12597 +
12598 + DONE;
12599 + } else {
12600 + FAIL;
12601 + }
12602 +
12603 + })
12604 +
12605 +
12606 +(define_insn "divmodsi4_internal"
12607 + [(set (match_operand:DI 0 "register_operand" "=r")
12608 + (unspec:DI [(match_operand:SI 1 "register_operand" "r")
12609 + (match_operand:SI 2 "register_operand" "r")]
12610 + UNSPEC_DIVMODSI4_INTERNAL))]
12611 + ""
12612 + "divs %0, %1, %2"
12613 + [(set_attr "type" "div")
12614 + (set_attr "cc" "none")])
12615 +
12616 +
12617 +;;=============================================================================
12618 +;; udivmod
12619 +;;-----------------------------------------------------------------------------
12620 +;; Unsigned division that produces both a quotient and a remainder.
12621 +;;=============================================================================
12622 +(define_expand "udivmodsi4"
12623 + [(parallel [
12624 + (parallel [
12625 + (set (match_operand:SI 0 "register_operand" "=r")
12626 + (udiv:SI (match_operand:SI 1 "register_operand" "r")
12627 + (match_operand:SI 2 "register_operand" "r")))
12628 + (set (match_operand:SI 3 "register_operand" "=r")
12629 + (umod:SI (match_dup 1)
12630 + (match_dup 2)))])
12631 + (use (match_dup 4))])]
12632 + ""
12633 + {
12634 + if (! no_new_pseudos) {
12635 + operands[4] = gen_reg_rtx (DImode);
12636 +
12637 + emit_insn(gen_udivmodsi4_internal(operands[4],operands[1],operands[2]));
12638 + emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
12639 + emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
12640 +
12641 + DONE;
12642 + } else {
12643 + FAIL;
12644 + }
12645 + })
12646 +
12647 +(define_insn "udivmodsi4_internal"
12648 + [(set (match_operand:DI 0 "register_operand" "=r")
12649 + (unspec:DI [(match_operand:SI 1 "register_operand" "r")
12650 + (match_operand:SI 2 "register_operand" "r")]
12651 + UNSPEC_UDIVMODSI4_INTERNAL))]
12652 + ""
12653 + "divu %0, %1, %2"
12654 + [(set_attr "type" "div")
12655 + (set_attr "cc" "none")])
12656 +
12657 +
12658 +;;=============================================================================
12659 +;; Arithmetic-shift left
12660 +;;-----------------------------------------------------------------------------
12661 +;; Arithmetic-shift reg0 left by reg2 or immediate value.
12662 +;;=============================================================================
12663 +
12664 +(define_insn "ashlsi3"
12665 + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
12666 + (ashift:SI (match_operand:SI 1 "register_operand" "r,0,r")
12667 + (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))]
12668 + ""
12669 + "@
12670 + lsl %0, %1, %2
12671 + lsl %0, %2
12672 + lsl %0, %1, %2"
12673 + [(set_attr "length" "4,2,4")
12674 + (set_attr "cc" "set_ncz")])
12675 +
12676 +;;=============================================================================
12677 +;; Arithmetic-shift right
12678 +;;-----------------------------------------------------------------------------
12679 +;; Arithmetic-shift reg0 right by an immediate value.
12680 +;;=============================================================================
12681 +
12682 +(define_insn "ashrsi3"
12683 + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
12684 + (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
12685 + (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))]
12686 + ""
12687 + "@
12688 + asr %0, %1, %2
12689 + asr %0, %2
12690 + asr %0, %1, %2"
12691 + [(set_attr "length" "4,2,4")
12692 + (set_attr "cc" "set_ncz")])
12693 +
12694 +;;=============================================================================
12695 +;; Logical shift right
12696 +;;-----------------------------------------------------------------------------
12697 +;; Logical shift reg0 right by an immediate value.
12698 +;;=============================================================================
12699 +
12700 +(define_insn "lshrsi3"
12701 + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
12702 + (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
12703 + (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))]
12704 + ""
12705 + "@
12706 + lsr %0, %1, %2
12707 + lsr %0, %2
12708 + lsr %0, %1, %2"
12709 + [(set_attr "length" "4,2,4")
12710 + (set_attr "cc" "set_ncz")])
12711 +
12712 +
12713 +;;=============================================================================
12714 +;; neg
12715 +;;-----------------------------------------------------------------------------
12716 +;; Negate operand 1 and store the result in operand 0.
12717 +;;=============================================================================
12718 +(define_insn "negsi2"
12719 + [(set (match_operand:SI 0 "register_operand" "=r")
12720 + (neg:SI (match_operand:SI 1 "register_operand" "0")))]
12721 + ""
12722 + "neg %0"
12723 + [(set_attr "length" "2")
12724 + (set_attr "cc" "set_vncz")])
12725 +
12726 +;;=============================================================================
12727 +;; abs
12728 +;;-----------------------------------------------------------------------------
12729 +;; Store the absolute value of operand 1 into operand 0.
12730 +;;=============================================================================
12731 +(define_insn "abssi2"
12732 + [(set (match_operand:SI 0 "register_operand" "=r")
12733 + (abs:SI (match_operand:SI 1 "register_operand" "0")))]
12734 + ""
12735 + "abs %0"
12736 + [(set_attr "length" "2")
12737 + (set_attr "cc" "set_z")])
12738 +
12739 +
12740 +;;=============================================================================
12741 +;; one_cmpl
12742 +;;-----------------------------------------------------------------------------
12743 +;; Store the bitwise-complement of operand 1 into operand 0.
12744 +;;=============================================================================
12745 +
12746 +(define_insn "one_cmplsi2"
12747 + [(set (match_operand:SI 0 "register_operand" "=r,r")
12748 + (not:SI (match_operand:SI 1 "register_operand" "r,0")))]
12749 + ""
12750 + "@
12751 + rsub %0, %1, -1
12752 + com %0"
12753 + [(set_attr "length" "4,2")
12754 + (set_attr "cc" "set_z")])
12755 +
12756 +
12757 +;;=============================================================================
12758 +;; Bit load
12759 +;;-----------------------------------------------------------------------------
12760 +;; Load a bit into Z and C flags
12761 +;;=============================================================================
12762 +(define_insn "bldsi"
12763 + [(set (cc0)
12764 + (and:SI (match_operand:SI 0 "register_operand" "r")
12765 + (match_operand:SI 1 "one_bit_set_operand" "i")))]
12766 + ""
12767 + "bld\t%0, %p1"
12768 + [(set_attr "length" "4")
12769 + (set_attr "cc" "bld")]
12770 + )
12771 +
12772 +
12773 +;;=============================================================================
12774 +;; Compare
12775 +;;-----------------------------------------------------------------------------
12776 +;; Compare reg0 with reg1 or an immediate value.
12777 +;;=============================================================================
12778 +
12779 +(define_expand "cmpqi"
12780 + [(set (cc0)
12781 + (compare:QI
12782 + (match_operand:QI 0 "general_operand" "")
12783 + (match_operand:QI 1 "general_operand" "")))]
12784 + ""
12785 + "{
12786 +
12787 + if ( GET_CODE(operands[0]) != REG
12788 + && GET_CODE(operands[0]) != SUBREG)
12789 + operands[0] = force_reg(QImode, operands[0]);
12790 +
12791 +
12792 + if ( GET_CODE(operands[1]) != REG
12793 + && GET_CODE(operands[1]) != SUBREG )
12794 + operands[1] = force_reg(QImode, operands[1]);
12795 +
12796 + avr32_compare_op0 = operands[0];
12797 + avr32_compare_op1 = operands[1];
12798 + emit_insn(gen_cmpqi_internal(operands[0], operands[1]));
12799 + DONE;
12800 + }"
12801 +)
12802 +
12803 +(define_insn "cmpqi_internal"
12804 + [(set (cc0)
12805 + (compare:QI
12806 + (match_operand:QI 0 "register_operand" "r")
12807 + (match_operand:QI 1 "register_operand" "r")))]
12808 + ""
12809 + {
12810 + set_next_insn_cond(insn,
12811 + avr32_output_cmp(get_next_insn_cond(insn), QImode, operands[0], operands[1]));
12812 + return "";
12813 + }
12814 + [(set_attr "length" "4")
12815 + (set_attr "cc" "compare")])
12816 +
12817 +(define_expand "cmphi"
12818 + [(set (cc0)
12819 + (compare:HI
12820 + (match_operand:HI 0 "general_operand" "")
12821 + (match_operand:HI 1 "general_operand" "")))]
12822 + ""
12823 + "{
12824 + if ( GET_CODE(operands[0]) != REG
12825 + && GET_CODE(operands[0]) != SUBREG )
12826 + operands[0] = force_reg(HImode, operands[0]);
12827 +
12828 +
12829 + if ( GET_CODE(operands[1]) != REG
12830 + && GET_CODE(operands[1]) != SUBREG)
12831 + operands[1] = force_reg(HImode, operands[1]);
12832 +
12833 + avr32_compare_op0 = operands[0];
12834 + avr32_compare_op1 = operands[1];
12835 + emit_insn(gen_cmphi_internal(operands[0], operands[1]));
12836 + DONE;
12837 + }"
12838 +)
12839 +
12840 +
12841 +(define_insn "cmphi_internal"
12842 + [(set (cc0)
12843 + (compare:HI
12844 + (match_operand:HI 0 "register_operand" "r")
12845 + (match_operand:HI 1 "register_operand" "r")))]
12846 + ""
12847 + {
12848 + set_next_insn_cond(insn,
12849 + avr32_output_cmp(get_next_insn_cond(insn), HImode, operands[0], operands[1]));
12850 + return "";
12851 + }
12852 + [(set_attr "length" "4")
12853 + (set_attr "cc" "compare")])
12854 +
12855 +
12856 +(define_expand "cmpsi"
12857 + [(set (cc0)
12858 + (compare:SI
12859 + (match_operand:SI 0 "general_operand" "")
12860 + (match_operand:SI 1 "general_operand" "")))]
12861 + ""
12862 + "{
12863 + if ( GET_CODE(operands[0]) != REG
12864 + && GET_CODE(operands[0]) != SUBREG )
12865 + operands[0] = force_reg(SImode, operands[0]);
12866 +
12867 + if ( GET_CODE(operands[1]) != REG
12868 + && GET_CODE(operands[1]) != SUBREG
12869 + && GET_CODE(operands[1]) != CONST_INT )
12870 + operands[1] = force_reg(SImode, operands[1]);
12871 +
12872 + avr32_compare_op0 = operands[0];
12873 + avr32_compare_op1 = operands[1];
12874 +
12875 +
12876 + emit_insn(gen_cmpsi_internal(operands[0], operands[1]));
12877 + DONE;
12878 + }"
12879 +)
12880 +
12881 +
12882 +
12883 +
12884 +(define_insn "cmpsi_internal"
12885 + [(set (cc0)
12886 + (compare:SI
12887 + (match_operand:SI 0 "register_operand" "r, r, r")
12888 + (match_operand:SI 1 "nonmemory_operand" "r, Ks06, Ks21")))]
12889 + ""
12890 + {
12891 + set_next_insn_cond(insn,
12892 + avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], operands[1]));
12893 + return "";
12894 + }
12895 +
12896 + [(set_attr "length" "2,2,4")
12897 + (set_attr "cc" "compare")])
12898 +
12899 +
12900 +(define_expand "cmpdi"
12901 + [(set (cc0)
12902 + (compare:DI
12903 + (match_operand:DI 0 "register_operand" "")
12904 + (match_operand:DI 1 "register_operand" "")))]
12905 + ""
12906 + {
12907 + avr32_compare_op0 = operands[0];
12908 + avr32_compare_op1 = operands[1];
12909 + emit_insn(gen_cmpdi_internal(operands[0], operands[1]));
12910 + DONE;
12911 + }
12912 +)
12913 +
12914 +(define_insn "cmpdi_internal"
12915 + [(set (cc0)
12916 + (compare:DI
12917 + (match_operand:DI 0 "register_operand" "r")
12918 + (match_operand:DI 1 "register_operand" "r")))]
12919 + ""
12920 + {
12921 + set_next_insn_cond(insn,
12922 + avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], operands[1]));
12923 + return "";
12924 + }
12925 +
12926 + [(set_attr "length" "6")
12927 + (set_attr "type" "alu2")
12928 + (set_attr "cc" "compare")])
12929 +
12930 +
12931 +
12932 +;;=============================================================================
12933 +;; Test if zero
12934 +;;-----------------------------------------------------------------------------
12935 +;; Compare reg against zero and set the condition codes.
12936 +;;=============================================================================
12937 +
12938 +
12939 +(define_expand "tstsi"
12940 + [(set (cc0)
12941 + (match_operand:SI 0 "register_operand" ""))]
12942 + ""
12943 + {
12944 + avr32_compare_op0 = operands[0];
12945 + avr32_compare_op1 = gen_rtx_CONST_INT(SImode, 0);
12946 + emit_insn(gen_tstsi_internal(operands[0]));
12947 + DONE;
12948 + }
12949 +)
12950 +
12951 +(define_insn "tstsi_internal"
12952 + [(set (cc0)
12953 + (match_operand:SI 0 "register_operand" "r"))]
12954 + ""
12955 + {
12956 + set_next_insn_cond(insn,
12957 + avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], const0_rtx));
12958 +
12959 + return "";
12960 + }
12961 + [(set_attr "length" "2")
12962 + (set_attr "cc" "compare")])
12963 +
12964 +
12965 +(define_expand "tstdi"
12966 + [(set (cc0)
12967 + (match_operand:DI 0 "register_operand" ""))]
12968 + ""
12969 + {
12970 + avr32_compare_op0 = operands[0];
12971 + avr32_compare_op1 = gen_rtx_CONST_INT(DImode, 0);
12972 + emit_insn(gen_tstdi_internal(operands[0]));
12973 + DONE;
12974 + }
12975 +)
12976 +
12977 +(define_insn "tstdi_internal"
12978 + [(set (cc0)
12979 + (match_operand:DI 0 "register_operand" "r"))]
12980 + ""
12981 + {
12982 + set_next_insn_cond(insn,
12983 + avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], const0_rtx));
12984 + return "";
12985 + }
12986 + [(set_attr "length" "4")
12987 + (set_attr "type" "alu2")
12988 + (set_attr "cc" "compare")])
12989 +
12990 +
12991 +
12992 +;;=============================================================================
12993 +;; Convert operands
12994 +;;-----------------------------------------------------------------------------
12995 +;;
12996 +;;=============================================================================
12997 +(define_insn "truncdisi2"
12998 + [(set (match_operand:SI 0 "general_operand" "")
12999 + (truncate:SI (match_operand:DI 1 "general_operand" "")))]
13000 + ""
13001 + "truncdisi2")
13002 +
13003 +;;=============================================================================
13004 +;; Extend
13005 +;;-----------------------------------------------------------------------------
13006 +;;
13007 +;;=============================================================================
13008 +
13009 +
13010 +(define_insn "extendhisi2"
13011 + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
13012 + (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
13013 + ""
13014 + {
13015 + switch ( which_alternative ){
13016 + case 0:
13017 + return "casts.h\t%0";
13018 + case 1:
13019 + return "bfexts\t%0, %1, 0, 16";
13020 + case 2:
13021 + case 3:
13022 + return "ld.sh\t%0, %1";
13023 + default:
13024 + abort();
13025 + }
13026 + }
13027 + [(set_attr "length" "2,4,2,4")
13028 + (set_attr "cc" "set_ncz,set_ncz,none,none")
13029 + (set_attr "type" "alu,alu,load_rm,load_rm")])
13030 +
13031 +(define_insn "extendqisi2"
13032 + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
13033 + (sign_extend:SI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
13034 + ""
13035 + {
13036 + switch ( which_alternative ){
13037 + case 0:
13038 + return "casts.b\t%0";
13039 + case 1:
13040 + return "bfexts\t%0, %1, 0, 8";
13041 + case 2:
13042 + case 3:
13043 + return "ld.sb\t%0, %1";
13044 + default:
13045 + abort();
13046 + }
13047 + }
13048 + [(set_attr "length" "2,4,2,4")
13049 + (set_attr "cc" "set_ncz,set_ncz,none,none")
13050 + (set_attr "type" "alu,alu,load_rm,load_rm")])
13051 +
13052 +(define_insn "extendqihi2"
13053 + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
13054 + (sign_extend:HI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
13055 + ""
13056 + {
13057 + switch ( which_alternative ){
13058 + case 0:
13059 + return "casts.b\t%0";
13060 + case 1:
13061 + return "bfexts\t%0, %1, 0, 8";
13062 + case 2:
13063 + case 3:
13064 + return "ld.sb\t%0, %1";
13065 + default:
13066 + abort();
13067 + }
13068 + }
13069 + [(set_attr "length" "2,4,2,4")
13070 + (set_attr "cc" "set_ncz,set_ncz,none,none")
13071 + (set_attr "type" "alu,alu,load_rm,load_rm")])
13072 +
13073 +
13074 +;;=============================================================================
13075 +;; Zero-extend
13076 +;;-----------------------------------------------------------------------------
13077 +;;
13078 +;;=============================================================================
13079 +
13080 +(define_insn "zero_extendhisi2"
13081 + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
13082 + (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
13083 + ""
13084 + {
13085 + switch ( which_alternative ){
13086 + case 0:
13087 + return "castu.h\t%0";
13088 + case 1:
13089 + return "bfextu\t%0, %1, 0, 16";
13090 + case 2:
13091 + case 3:
13092 + return "ld.uh\t%0, %1";
13093 + default:
13094 + abort();
13095 + }
13096 + }
13097 +
13098 + [(set_attr "length" "2,4,2,4")
13099 + (set_attr "cc" "set_ncz,set_ncz,none,none")
13100 + (set_attr "type" "alu,alu,load_rm,load_rm")])
13101 +
13102 +(define_insn "zero_extendqisi2"
13103 + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
13104 + (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
13105 + ""
13106 + {
13107 + switch ( which_alternative ){
13108 + case 0:
13109 + return "castu.b\t%0";
13110 + case 1:
13111 + return "bfextu\t%0, %1, 0, 8";
13112 + case 2:
13113 + case 3:
13114 + return "ld.ub\t%0, %1";
13115 + default:
13116 + abort();
13117 + }
13118 + }
13119 + [(set_attr "length" "2,4,2,4")
13120 + (set_attr "cc" "set_ncz, set_ncz, none, none")
13121 + (set_attr "type" "alu, alu, load_rm, load_rm")])
13122 +
13123 +(define_insn "zero_extendqihi2"
13124 + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
13125 + (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
13126 + ""
13127 + {
13128 + switch ( which_alternative ){
13129 + case 0:
13130 + return "castu.b\t%0";
13131 + case 1:
13132 + return "bfextu\t%0, %1, 0, 8";
13133 + case 2:
13134 + case 3:
13135 + return "ld.ub\t%0, %1";
13136 + default:
13137 + abort();
13138 + }
13139 + }
13140 + [(set_attr "length" "2,4,2,4")
13141 + (set_attr "cc" "set_ncz, set_ncz, none, none")
13142 + (set_attr "type" "alu, alu, load_rm, load_rm")])
13143 +
13144 +
13145 +
13146 +;;=============================================================================
13147 +;; Conditional set register
13148 +;; sr{cond4} rd
13149 +;;-----------------------------------------------------------------------------
13150 +
13151 +;;Because of the same issue as with conditional moves and adds we must
13152 +;;not separate the compare instrcution from the scc instruction as
13153 +;;they might be sheduled "badly".
13154 +
13155 +(define_expand "s<code>"
13156 + [(set (match_operand:SI 0 "register_operand" "")
13157 + (any_cond (cc0)
13158 + (const_int 0)))]
13159 + ""
13160 + {
13161 + if ( !avr32_expand_scc(<CODE>, operands) ){
13162 + FAIL;
13163 + }
13164 + DONE;
13165 + }
13166 + )
13167 +
13168 +
13169 +(define_insn "comparesi_and_set"
13170 + [(set (match_operand:SI 0 "register_operand" "=r")
13171 + (match_operator 1 "avr32_comparison_operator"
13172 + [ (compare (match_operand:SI 2 "register_operand" "r")
13173 + (match_operand:SI 3 "general_operand" "rKs06Ks21"))
13174 + (const_int 0)]))]
13175 + ""
13176 + {
13177 + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], operands[3]);
13178 + return "sr%1\t%0";
13179 + }
13180 + [(set_attr "length" "6")
13181 + (set_attr "cc" "clobber")])
13182 +
13183 +(define_insn "comparehi_and_set"
13184 + [(set (match_operand:SI 0 "register_operand" "=r")
13185 + (match_operator 1 "avr32_comparison_operator"
13186 + [ (compare (match_operand:HI 2 "register_operand" "r")
13187 + (match_operand:HI 3 "register_operand" "r"))
13188 + (const_int 0)]))]
13189 + ""
13190 + {
13191 + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], operands[3]);
13192 + return "sr%1\t%0";
13193 + }
13194 + [(set_attr "length" "6")
13195 + (set_attr "cc" "clobber")])
13196 +
13197 +(define_insn "compareqi_and_set"
13198 + [(set (match_operand:SI 0 "register_operand" "=r")
13199 + (match_operator 1 "avr32_comparison_operator"
13200 + [ (compare (match_operand:QI 2 "register_operand" "r")
13201 + (match_operand:QI 3 "register_operand" "r"))
13202 + (const_int 0)]))]
13203 + ""
13204 + {
13205 + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], operands[3]);
13206 + return "sr%1\t%0";
13207 + }
13208 + [(set_attr "length" "6")
13209 + (set_attr "cc" "clobber")])
13210 +
13211 +(define_insn "*comparedi_and_set"
13212 + [(set (match_operand:SI 0 "register_operand" "=r")
13213 + (match_operator 1 "avr32_comparison_operator"
13214 + [ (compare (match_operand:DI 2 "register_operand" "r")
13215 + (match_operand:DI 3 "register_operand" "r"))
13216 + (const_int 0)]))]
13217 + ""
13218 + {
13219 + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], operands[3]);
13220 + return "sr%1\t%0";
13221 + }
13222 + [(set_attr "length" "6")
13223 + (set_attr "cc" "clobber")])
13224 +
13225 +(define_insn "*tstdi_and_set"
13226 + [(set (match_operand:SI 0 "register_operand" "=r")
13227 + (match_operator 1 "avr32_comparison_operator"
13228 + [ (compare (match_operand:DI 2 "register_operand" "r")
13229 + (const_int 0))
13230 + (const_int 0)]))]
13231 + ""
13232 + {
13233 + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[2]), operands[2], const0_rtx);
13234 + return "sr%1\t%0";
13235 + }
13236 + [(set_attr "length" "6")
13237 + (set_attr "cc" "clobber")])
13238 +
13239 +
13240 +
13241 +;;=============================================================================
13242 +;; Conditional branch
13243 +;;-----------------------------------------------------------------------------
13244 +;; Branch to label if the specified condition codes are set.
13245 +;;=============================================================================
13246 +; branch if negative
13247 +(define_insn "bmi"
13248 + [(set (pc)
13249 + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
13250 + (label_ref (match_operand 0 "" ""))
13251 + (pc)))]
13252 + ""
13253 + "brmi %0"
13254 + [(set_attr "type" "branch")
13255 + (set (attr "length")
13256 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
13257 + (le (minus (pc) (match_dup 0)) (const_int 256)))
13258 + (const_int 2)] ; use compact branch
13259 + (const_int 4))) ; use extended branch
13260 + (set_attr "cc" "none")])
13261 +
13262 +(define_insn "*bmi-reverse"
13263 + [(set (pc)
13264 + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
13265 + (pc)
13266 + (label_ref (match_operand 0 "" ""))))]
13267 + ""
13268 + "brpl %0"
13269 + [(set_attr "type" "branch")
13270 + (set (attr "length")
13271 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
13272 + (le (minus (pc) (match_dup 0)) (const_int 256)))
13273 + (const_int 2)] ; use compact branch
13274 + (const_int 4))) ; use extended branch
13275 + (set_attr "cc" "none")])
13276 +
13277 +; branch if positive
13278 +(define_insn "bpl"
13279 + [(set (pc)
13280 + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
13281 + (label_ref (match_operand 0 "" ""))
13282 + (pc)))]
13283 + ""
13284 + "brpl %0"
13285 + [(set_attr "type" "branch")
13286 + (set (attr "length")
13287 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
13288 + (le (minus (pc) (match_dup 0)) (const_int 256)))
13289 + (const_int 2)] ; use compact branch
13290 + (const_int 4))) ; use extended branch
13291 + (set_attr "cc" "none")])
13292 +
13293 +(define_insn "*bpl-reverse"
13294 + [(set (pc)
13295 + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
13296 + (pc)
13297 + (label_ref (match_operand 0 "" ""))))]
13298 + ""
13299 + "brmi %0"
13300 + [(set_attr "type" "branch")
13301 + (set (attr "length")
13302 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
13303 + (le (minus (pc) (match_dup 0)) (const_int 256)))
13304 + (const_int 2)] ; use compact branch
13305 + (const_int 4))) ; use extended branch
13306 + (set_attr "cc" "none")])
13307 +
13308 +; branch if equal
13309 +(define_insn "b<code>"
13310 + [(set (pc)
13311 + (if_then_else (any_cond:CC (cc0)
13312 + (const_int 0))
13313 + (label_ref (match_operand 0 "" ""))
13314 + (pc)))]
13315 + ""
13316 + "br<cond> %0 "
13317 + [(set_attr "type" "branch")
13318 + (set (attr "length")
13319 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
13320 + (le (minus (pc) (match_dup 0)) (const_int 256)))
13321 + (const_int 2)] ; use compact branch
13322 + (const_int 4))) ; use extended branch
13323 + (set_attr "cc" "none")])
13324 +
13325 +
13326 +(define_insn "*b<code>-reverse"
13327 + [(set (pc)
13328 + (if_then_else (any_cond:CC (cc0)
13329 + (const_int 0))
13330 + (pc)
13331 + (label_ref (match_operand 0 "" ""))))]
13332 + ""
13333 + "br<invcond> %0 "
13334 + [(set_attr "type" "branch")
13335 + (set (attr "length")
13336 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
13337 + (le (minus (pc) (match_dup 0)) (const_int 256)))
13338 + (const_int 2)] ; use compact branch
13339 + (const_int 4))) ; use extended branch
13340 + (set_attr "cc" "none")])
13341 +
13342 +
13343 +
13344 +;=============================================================================
13345 +; Conditional Add/Subtract
13346 +;-----------------------------------------------------------------------------
13347 +; sub{cond4} Rd, imm
13348 +;=============================================================================
13349 +
13350 +
13351 +(define_expand "add<mode>cc"
13352 + [(set (match_operand:ADDCC 0 "register_operand" "")
13353 + (if_then_else:ADDCC (match_operand 1 "avr32_comparison_operator" "")
13354 + (match_operand:ADDCC 2 "register_immediate_operand" "")
13355 + (match_operand:ADDCC 3 "register_immediate_operand" "")))]
13356 + ""
13357 + {
13358 + if ( avr32_expand_addcc(<MODE>mode, operands ) )
13359 + DONE;
13360 + else
13361 + FAIL;
13362 + }
13363 + )
13364 +
13365 +
13366 +(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>"
13367 + [(set (match_operand:ADDCC 0 "register_operand" "=&r")
13368 + (unspec:ADDCC [(match_operand 1 "avr32_comparison_operator" "")
13369 + (match_operand:ADDCC 2 "register_operand" "0")
13370 + (match_operand:ADDCC 3 "immediate_operand" "Ks08")
13371 + (match_operand:CMP 4 "register_operand" "r")
13372 + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")
13373 + ]
13374 + UNSPEC_ADDSICC ))]
13375 + ""
13376 + {
13377 + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
13378 +
13379 + return "sub%1\t%0, %3";
13380 + }
13381 + [(set_attr "length" "8")
13382 + (set_attr "cc" "clobber")])
13383 +
13384 +
13385 +;=============================================================================
13386 +; Conditional Move
13387 +;-----------------------------------------------------------------------------
13388 +; mov{cond4} Rd, (Rs/imm)
13389 +;=============================================================================
13390 +(define_expand "mov<mode>cc"
13391 + [(set (match_operand:ADDCC 0 "register_operand" "")
13392 + (if_then_else:ADDCC (match_operand 1 "avr32_comparison_operator" "")
13393 + (match_operand:ADDCC 2 "register_immediate_operand" "")
13394 + (match_operand:ADDCC 3 "register_immediate_operand" "")))]
13395 + ""
13396 + {
13397 + if ( avr32_expand_movcc(<MODE>mode, operands ) )
13398 + DONE;
13399 + else
13400 + FAIL;
13401 + }
13402 + )
13403 +
13404 +(define_insn "mov<MOVCC:mode>cc_cmp<CMP:mode>"
13405 + [(set (match_operand:MOVCC 0 "register_operand" "=r,r,r")
13406 + (unspec:MOVCC [(match_operand 1 "avr32_comparison_operator" "")
13407 + (match_operand:MOVCC 2 "register_immediate_operand" "0,rKs08,rKs08")
13408 + (match_operand:MOVCC 3 "register_immediate_operand" "rKs08,0,rKs08")
13409 + (match_operand:CMP 4 "register_operand" "r, r, r")
13410 + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>, <CMP:cmp_constraint>, <CMP:cmp_constraint>")
13411 + ]
13412 + UNSPEC_MOVSICC ))]
13413 + ""
13414 + {
13415 + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
13416 +
13417 + switch( which_alternative ){
13418 + case 0:
13419 + return "mov%i1 %0, %3";
13420 + case 1:
13421 + return "mov%1 %0, %2";
13422 + case 2:
13423 + return "mov%1 %0, %2\;mov%i1 %0, %3";
13424 + default:
13425 + abort();
13426 + }
13427 +
13428 +
13429 + }
13430 + [(set_attr "length" "8,8,12")
13431 + (set_attr "cc" "clobber")])
13432 +
13433 +
13434 +;;=============================================================================
13435 +;; jump
13436 +;;-----------------------------------------------------------------------------
13437 +;; Jump inside a function; an unconditional branch to a label.
13438 +;;=============================================================================
13439 +(define_insn "jump"
13440 + [(set (pc)
13441 + (label_ref (match_operand 0 "" "")))]
13442 + ""
13443 + {
13444 + if (get_attr_length(insn) > 4)
13445 + return "Can't jump this far";
13446 + return (get_attr_length(insn) == 2 ?
13447 + "rjmp %0" : "bral %0");
13448 + }
13449 + [(set_attr "type" "branch")
13450 + (set (attr "length")
13451 + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 1022))
13452 + (le (minus (pc) (match_dup 0)) (const_int 1024)))
13453 + (const_int 2) ; use rjmp
13454 + (le (match_dup 0) (const_int 1048575))
13455 + (const_int 4)] ; use bral
13456 + (const_int 8))) ; do something else
13457 + (set_attr "cc" "none")])
13458 +
13459 +;;=============================================================================
13460 +;; call
13461 +;;-----------------------------------------------------------------------------
13462 +;; Subroutine call instruction returning no value.
13463 +;;=============================================================================
13464 +(define_insn "call_internal"
13465 + [(parallel [(call (mem:SI (match_operand:SI 0 "avr32_call_operand" "r,U,T,W"))
13466 + (match_operand 1 "" ""))
13467 + (clobber (reg:SI LR_REGNUM))])]
13468 + ""
13469 + {
13470 + switch (which_alternative){
13471 + case 0:
13472 + return "icall\t%0";
13473 + case 1:
13474 + return "rcall\t%0";
13475 + case 2:
13476 + return "mcall\t%0";
13477 + case 3:
13478 + if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
13479 + return "call\t%0";
13480 + else
13481 + return "mcall\tr6[%0@got]";
13482 + default:
13483 + abort();
13484 + }
13485 + }
13486 + [(set_attr "type" "call")
13487 + (set_attr "length" "2,4,4,10")
13488 + (set_attr "cc" "clobber")])
13489 +
13490 +
13491 +(define_expand "call"
13492 + [(parallel [(call (match_operand:SI 0 "" "")
13493 + (match_operand 1 "" ""))
13494 + (clobber (reg:SI LR_REGNUM))])]
13495 + ""
13496 + {
13497 + rtx call_address;
13498 + if ( GET_CODE(operands[0]) != MEM )
13499 + FAIL;
13500 +
13501 + call_address = XEXP(operands[0], 0);
13502 +
13503 + /* If assembler supports call pseudo insn and the call
13504 + address is a symbol then nothing special needs to be done. */
13505 + if ( TARGET_HAS_ASM_ADDR_PSEUDOS
13506 + && (GET_CODE(call_address) == SYMBOL_REF) ){
13507 + /* We must however mark the function as using the GOT if
13508 + flag_pic is set, since the call insn might turn into
13509 + a mcall using the GOT ptr register. */
13510 + if ( flag_pic ){
13511 + current_function_uses_pic_offset_table = 1;
13512 + emit_call_insn(gen_call_internal(call_address, operands[1]));
13513 + DONE;
13514 + }
13515 + } else {
13516 + if ( flag_pic &&
13517 + GET_CODE(call_address) == SYMBOL_REF ){
13518 + current_function_uses_pic_offset_table = 1;
13519 + emit_call_insn(gen_call_internal(call_address, operands[1]));
13520 + DONE;
13521 + }
13522 +
13523 + if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[0]) ){
13524 + if ( optimize_size &&
13525 + GET_CODE(call_address) == SYMBOL_REF ){
13526 + call_address = force_const_mem(SImode, call_address);
13527 + } else {
13528 + call_address = force_reg(SImode, call_address);
13529 + }
13530 + }
13531 + }
13532 + emit_call_insn(gen_call_internal(call_address, operands[1]));
13533 + DONE;
13534 + }
13535 +)
13536 +
13537 +;;=============================================================================
13538 +;; call_value
13539 +;;-----------------------------------------------------------------------------
13540 +;; Subrutine call instruction returning a value.
13541 +;;=============================================================================
13542 +(define_expand "call_value"
13543 + [(parallel [(set (match_operand:SI 0 "" "")
13544 + (call (match_operand:SI 1 "" "")
13545 + (match_operand 2 "" "")))
13546 + (clobber (reg:SI LR_REGNUM))])]
13547 + ""
13548 + {
13549 + rtx call_address;
13550 + if ( GET_CODE(operands[1]) != MEM )
13551 + FAIL;
13552 +
13553 + call_address = XEXP(operands[1], 0);
13554 +
13555 + /* If assembler supports call pseudo insn and the call
13556 + address is a symbol then nothing special needs to be done. */
13557 + if ( TARGET_HAS_ASM_ADDR_PSEUDOS
13558 + && (GET_CODE(call_address) == SYMBOL_REF) ){
13559 + /* We must however mark the function as using the GOT if
13560 + flag_pic is set, since the call insn might turn into
13561 + a mcall using the GOT ptr register. */
13562 + if ( flag_pic ) {
13563 + current_function_uses_pic_offset_table = 1;
13564 + emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
13565 + DONE;
13566 + }
13567 + } else {
13568 + if ( flag_pic &&
13569 + GET_CODE(call_address) == SYMBOL_REF ){
13570 + current_function_uses_pic_offset_table = 1;
13571 + emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
13572 + DONE;
13573 + }
13574 +
13575 + if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[1]) ){
13576 + if ( optimize_size &&
13577 + GET_CODE(call_address) == SYMBOL_REF){
13578 + call_address = force_const_mem(SImode, call_address);
13579 + } else {
13580 + call_address = force_reg(SImode, call_address);
13581 + }
13582 + }
13583 + }
13584 + emit_call_insn(gen_call_value_internal(operands[0], call_address,
13585 + operands[2]));
13586 + DONE;
13587 +
13588 + })
13589 +
13590 +(define_insn "call_value_internal"
13591 + [(parallel [(set (match_operand 0 "register_operand" "=r,r,r,r")
13592 + (call (mem:SI (match_operand:SI 1 "avr32_call_operand" "r,U,T,W"))
13593 + (match_operand 2 "" "")))
13594 + (clobber (reg:SI LR_REGNUM))])]
13595 + ;; Operand 2 not used on the AVR32.
13596 + ""
13597 + {
13598 + switch (which_alternative){
13599 + case 0:
13600 + return "icall\t%1";
13601 + case 1:
13602 + return "rcall\t%1";
13603 + case 2:
13604 + return "mcall\t%1";
13605 + case 3:
13606 + if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
13607 + return "call\t%1";
13608 + else
13609 + return "mcall\tr6[%1@got]";
13610 + default:
13611 + abort();
13612 + }
13613 + }
13614 + [(set_attr "type" "call")
13615 + (set_attr "length" "2,4,4,10")
13616 + (set_attr "cc" "call_set")])
13617 +
13618 +
13619 +;;=============================================================================
13620 +;; untyped_call
13621 +;;-----------------------------------------------------------------------------
13622 +;; Subrutine call instruction returning a value of any type.
13623 +;; The code is copied from m68k.md (except gen_blockage is removed)
13624 +;; Fixme!
13625 +;;=============================================================================
13626 +(define_expand "untyped_call"
13627 + [(parallel [(call (match_operand 0 "avr32_call_operand" "")
13628 + (const_int 0))
13629 + (match_operand 1 "" "")
13630 + (match_operand 2 "" "")])]
13631 + ""
13632 + {
13633 + int i;
13634 +
13635 + emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
13636 +
13637 + for (i = 0; i < XVECLEN (operands[2], 0); i++) {
13638 + rtx set = XVECEXP (operands[2], 0, i);
13639 + emit_move_insn (SET_DEST (set), SET_SRC (set));
13640 + }
13641 +
13642 + /* The optimizer does not know that the call sets the function value
13643 + registers we stored in the result block. We avoid problems by
13644 + claiming that all hard registers are used and clobbered at this
13645 + point. */
13646 + emit_insn (gen_blockage ());
13647 +
13648 + DONE;
13649 + })
13650 +
13651 +
13652 +;;=============================================================================
13653 +;; return
13654 +;;=============================================================================
13655 +
13656 +(define_insn "return"
13657 + [(return)]
13658 + "USE_RETURN_INSN (FALSE)"
13659 + {
13660 + avr32_output_return_instruction(TRUE, FALSE, NULL, NULL);
13661 + return "";
13662 + }
13663 + [(set_attr "length" "4")
13664 + (set_attr "type" "call")]
13665 + )
13666 +
13667 +(define_insn "*return_value_imm"
13668 + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
13669 + (use (reg RETVAL_REGNUM))
13670 + (return)])]
13671 + "USE_RETURN_INSN (FALSE) &&
13672 + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
13673 + {
13674 + avr32_output_return_instruction(TRUE, FALSE, NULL, operands[0]);
13675 + return "";
13676 + }
13677 + [(set_attr "length" "4")
13678 + (set_attr "type" "call")]
13679 + )
13680 +
13681 +(define_insn "*return_value_si"
13682 + [(set (reg RETVAL_REGNUM) (match_operand:SI 0 "register_operand" "r"))
13683 + (use (reg RETVAL_REGNUM))
13684 + (return)]
13685 + "USE_RETURN_INSN (TRUE)"
13686 + "retal %0";
13687 + [(set_attr "type" "call")]
13688 + )
13689 +
13690 +(define_insn "*return_value_hi"
13691 + [(parallel [(set (reg RETVAL_REGNUM) (match_operand:HI 0 "register_operand" "r"))
13692 + (use (reg RETVAL_REGNUM))
13693 + (return)])]
13694 + "USE_RETURN_INSN (TRUE)"
13695 + "retal %0"
13696 + [(set_attr "type" "call")]
13697 + )
13698 +
13699 +(define_insn "*return_value_qi"
13700 + [(parallel [(set (reg RETVAL_REGNUM) (match_operand:QI 0 "register_operand" "r"))
13701 + (use (reg RETVAL_REGNUM))
13702 + (return)])]
13703 + "USE_RETURN_INSN (TRUE)"
13704 + "retal %0"
13705 + [(set_attr "type" "call")]
13706 + )
13707 +
13708 +;;=============================================================================
13709 +;; nop
13710 +;;-----------------------------------------------------------------------------
13711 +;; No-op instruction.
13712 +;;=============================================================================
13713 +(define_insn "nop"
13714 + [(const_int 0)]
13715 + ""
13716 + "nop"
13717 + [(set_attr "length" "2")
13718 + (set_attr "type" "alu")
13719 + (set_attr "cc" "none")])
13720 +
13721 +;;=============================================================================
13722 +;; nonlocal_goto
13723 +;;-----------------------------------------------------------------------------
13724 +;; Jump from one function to a label in an outer function.
13725 +;; Must invalidate return stack, since the function will be exited without
13726 +;; a return
13727 +;;=============================================================================
13728 +(define_expand "nonlocal_goto"
13729 + [(use (match_operand 0 "" ""))
13730 + (use (match_operand 1 "" ""))
13731 + (use (match_operand 2 "" ""))
13732 + (use (match_operand 3 "" ""))]
13733 + ""
13734 + {
13735 + emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__nonlocal_goto"),
13736 + 0, VOIDmode, 3,
13737 + operands[0], SImode,
13738 + operands[1], Pmode,
13739 + operands[2], SImode);
13740 +
13741 + DONE;
13742 + }
13743 +)
13744 +
13745 +
13746 +(define_expand "builtin_longjmp"
13747 + [(use (match_operand 0 "" ""))]
13748 + ""
13749 + {
13750 + rtx ops[3];
13751 +
13752 + ops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS(SImode, operands[0], gen_rtx_CONST_INT(SImode,0)));
13753 + ops[1] = gen_rtx_MEM (Pmode, gen_rtx_PLUS(SImode, operands[0], gen_rtx_CONST_INT(SImode,4)));
13754 + ops[2] = gen_rtx_MEM (Pmode, gen_rtx_PLUS(SImode, operands[0], gen_rtx_CONST_INT(SImode,8)));
13755 +
13756 +
13757 + emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__nonlocal_goto"),
13758 + 0, VOIDmode, 3,
13759 + ops[0], SImode,
13760 + ops[1], Pmode,
13761 + ops[2], SImode);
13762 +
13763 + DONE;
13764 + }
13765 + )
13766 +
13767 +
13768 +;;=============================================================================
13769 +;; indirect_jump
13770 +;;-----------------------------------------------------------------------------
13771 +;; Jump to an address in reg or memory.
13772 +;;=============================================================================
13773 +(define_expand "indirect_jump"
13774 + [(set (pc)
13775 + (match_operand:SI 0 "general_operand" "r,m"))]
13776 + ""
13777 + {
13778 + /* One of the ops has to be in a register. */
13779 + if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS )
13780 + && !avr32_legitimate_pic_operand_p(operands[0]) )
13781 + operands[0] = legitimize_pic_address (operands[0], SImode, 0);
13782 + else if ( flag_pic && avr32_address_operand(operands[0], GET_MODE(operands[0])) )
13783 + /* If we have an address operand then this function uses the pic register. */
13784 + current_function_uses_pic_offset_table = 1;
13785 + })
13786 +
13787 +
13788 +(define_insn "indirect_jump_internal"
13789 + [(set (pc)
13790 + (match_operand:SI 0 "general_operand" "r,m,W"))]
13791 + ""
13792 + {
13793 + switch( which_alternative ){
13794 + case 0:
13795 + return "mov\tpc, %0";
13796 + case 1:
13797 + if ( avr32_const_pool_ref_operand(operands[0], GET_MODE(operands[0])) )
13798 + return "lddpc\tpc, %0";
13799 + else
13800 + return "ld.w\tpc, %0";
13801 + case 2:
13802 + if ( flag_pic )
13803 + return "ld.w\tpc, r6[%0@got]";
13804 + else
13805 + return "lda.w\tpc, %0";
13806 + default:
13807 + abort();
13808 + }
13809 + }
13810 + [(set_attr "length" "2,4,8")
13811 + (set_attr "type" "call,call,call")
13812 + (set_attr "cc" "none,none,clobber")])
13813 +
13814 +
13815 +;;=============================================================================
13816 +;; casesi
13817 +;;=============================================================================
13818 +
13819 +
13820 +(define_expand "casesi"
13821 + [(match_operand:SI 0 "register_operand" "") ; index to jump on
13822 + (match_operand:SI 1 "const_int_operand" "") ; lower bound
13823 + (match_operand:SI 2 "const_int_operand" "") ; total range
13824 + (match_operand:SI 3 "" "") ; table label
13825 + (match_operand:SI 4 "" "")] ; Out of range label
13826 + ""
13827 + "
13828 + {
13829 + rtx reg;
13830 + if (operands[1] != const0_rtx)
13831 + {
13832 + if (!avr32_const_ok_for_constraint_p(INTVAL (operands[1]), 'I', \"Is21\")){
13833 + reg = force_reg(SImode, GEN_INT (INTVAL (operands[1])));
13834 + emit_insn (gen_subsi3 (reg, operands[0],
13835 + reg));
13836 + } else {
13837 + reg = gen_reg_rtx (SImode);
13838 + emit_insn (gen_addsi3 (reg, operands[0],
13839 + GEN_INT (-INTVAL (operands[1]))));
13840 + }
13841 + operands[0] = reg;
13842 + }
13843 +
13844 + if (!avr32_const_ok_for_constraint_p(INTVAL (operands[2]), 'K', \"Ks21\"))
13845 + operands[2] = force_reg (SImode, operands[2]);
13846 +
13847 + emit_jump_insn (gen_casesi_internal (operands[0], operands[2], operands[3],
13848 + operands[4], gen_reg_rtx(SImode)));
13849 + DONE;
13850 + }"
13851 +)
13852 +
13853 +;; The USE in this pattern is needed to tell flow analysis that this is
13854 +;; a CASESI insn. It has no other purpose.
13855 +(define_insn "casesi_internal"
13856 + [(parallel [(set (pc)
13857 + (if_then_else
13858 + (leu (match_operand:SI 0 "register_operand" "r")
13859 + (match_operand:SI 1 "register_immediate_operand" "rKu03"))
13860 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
13861 + (label_ref (match_operand 2 "" ""))))
13862 + (label_ref (match_operand 3 "" ""))))
13863 + (clobber (match_operand:SI 4 "register_operand" "=r"))
13864 + (use (label_ref (match_dup 2)))])]
13865 + ""
13866 + {
13867 + if (flag_pic)
13868 + return "cp.w\t%0, %1\;brhi\t%3\;sub\t%4, pc, -(%2 - .)\;add\tpc, %4, %0 << 2";
13869 + return "cp.w\t%0, %1\;brhi\t%3\;sub\t%4, pc, -(%2 - .)\;ld.w\tpc, %4[%0 << 2]";
13870 + }
13871 + [(set_attr "cc" "clobber")
13872 + (set_attr "length" "16")]
13873 +)
13874 +
13875 +
13876 +(define_insn "prefetch"
13877 + [(prefetch (match_operand:SI 0 "register_operand" "r")
13878 + (match_operand 1 "const_int_operand" "")
13879 + (match_operand 2 "const_int_operand" ""))]
13880 + ""
13881 + {
13882 + return "pref\t%0[0]";
13883 + }
13884 +
13885 + [(set_attr "length" "4")
13886 + (set_attr "type" "load")
13887 + (set_attr "cc" "none")])
13888 +
13889 +
13890 +
13891 +;;=============================================================================
13892 +;; prologue
13893 +;;-----------------------------------------------------------------------------
13894 +;; This pattern, if defined, emits RTL for entry to a function. The function
13895 +;; entry i responsible for setting up the stack frame, initializing the frame
13896 +;; pointer register, saving callee saved registers, etc.
13897 +;;=============================================================================
13898 +(define_expand "prologue"
13899 + [(clobber (const_int 0))]
13900 + ""
13901 + "
13902 + avr32_expand_prologue();
13903 + DONE;
13904 + "
13905 + )
13906 +
13907 +;;=============================================================================
13908 +;; eh_return
13909 +;;-----------------------------------------------------------------------------
13910 +;; This pattern, if defined, affects the way __builtin_eh_return, and
13911 +;; thence the call frame exception handling library routines, are
13912 +;; built. It is intended to handle non-trivial actions needed along
13913 +;; the abnormal return path.
13914 +;;
13915 +;; The address of the exception handler to which the function should
13916 +;; return is passed as operand to this pattern. It will normally need
13917 +;; to copied by the pattern to some special register or memory
13918 +;; location. If the pattern needs to determine the location of the
13919 +;; target call frame in order to do so, it may use
13920 +;; EH_RETURN_STACKADJ_RTX, if defined; it will have already been
13921 +;; assigned.
13922 +;;
13923 +;; If this pattern is not defined, the default action will be to
13924 +;; simply copy the return address to EH_RETURN_HANDLER_RTX. Either
13925 +;; that macro or this pattern needs to be defined if call frame
13926 +;; exception handling is to be used.
13927 +(define_expand "eh_return"
13928 + [(use (match_operand 0 "general_operand" ""))]
13929 + ""
13930 + "
13931 + avr32_set_return_address (operands[0]);
13932 + DONE;
13933 + "
13934 + )
13935 +
13936 +;;=============================================================================
13937 +;; ffssi2
13938 +;;-----------------------------------------------------------------------------
13939 +(define_insn "ffssi2"
13940 + [ (set (match_operand:SI 0 "register_operand" "=r")
13941 + (ffs:SI (match_operand:SI 1 "register_operand" "r"))) ]
13942 + ""
13943 + "mov %0, %1
13944 + brev %0
13945 + clz %0, %0
13946 + sub %0, -1
13947 + cp %0, 33
13948 + moveq %0, 0"
13949 + [(set_attr "length" "18")
13950 + (set_attr "cc" "clobber")]
13951 + )
13952 +
13953 +
13954 +
13955 +;;=============================================================================
13956 +;; swap_h
13957 +;;-----------------------------------------------------------------------------
13958 +(define_insn "*swap_h"
13959 + [ (set (match_operand:SI 0 "register_operand" "=r")
13960 + (ior:SI (ashift:SI (match_dup 0) (const_int 16))
13961 + (lshiftrt:SI (match_dup 0) (const_int 16))))]
13962 + ""
13963 + "swap.h %0"
13964 + [(set_attr "length" "2")]
13965 + )
13966 +
13967 +(define_insn_and_split "bswap_16"
13968 + [ (set (match_operand:HI 0 "avr32_bswap_operand" "=r,RKs13,r")
13969 + (ior:HI (and:HI (lshiftrt:HI (match_operand:HI 1 "avr32_bswap_operand" "r,r,RKs13")
13970 + (const_int 8))
13971 + (const_int 255))
13972 + (ashift:HI (and:HI (match_dup 1)
13973 + (const_int 255))
13974 + (const_int 8))))]
13975 + ""
13976 + {
13977 + switch ( which_alternative ){
13978 + case 0:
13979 + if ( REGNO(operands[0]) == REGNO(operands[1]))
13980 + return "swap.bh\t%0";
13981 + else
13982 + return "mov\t%0, %1\;swap.bh\t%0";
13983 + case 1:
13984 + return "stswp.h\t%0, %1";
13985 + case 2:
13986 + return "ldswp.sh\t%0, %1";
13987 + default:
13988 + abort();
13989 + }
13990 + }
13991 +
13992 + "(reload_completed &&
13993 + REG_P(operands[0]) && REG_P(operands[1])
13994 + && (REGNO(operands[0]) != REGNO(operands[1])))"
13995 + [(set (match_dup 0) (match_dup 1))
13996 + (set (match_dup 0)
13997 + (ior:HI (and:HI (lshiftrt:HI (match_dup 0)
13998 + (const_int 8))
13999 + (const_int 255))
14000 + (ashift:HI (and:HI (match_dup 0)
14001 + (const_int 255))
14002 + (const_int 8))))]
14003 + ""
14004 +
14005 + [(set_attr "length" "4,4,4")
14006 + (set_attr "type" "alu,store,load_rm")]
14007 + )
14008 +
14009 +(define_insn_and_split "bswap_32"
14010 + [ (set (match_operand:SI 0 "avr32_bswap_operand" "=r,RKs14,r")
14011 + (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_operand:SI 1 "avr32_bswap_operand" "=r,r,RKs14")
14012 + (const_int 4278190080))
14013 + (const_int 24))
14014 + (lshiftrt:SI (and:SI (match_dup 1)
14015 + (const_int 16711680))
14016 + (const_int 8)))
14017 + (ior:SI (ashift:SI (and:SI (match_dup 1)
14018 + (const_int 65280))
14019 + (const_int 8))
14020 + (ashift:SI (and:SI (match_dup 1)
14021 + (const_int 255))
14022 + (const_int 24)))))]
14023 + ""
14024 + {
14025 + switch ( which_alternative ){
14026 + case 0:
14027 + if ( REGNO(operands[0]) == REGNO(operands[1]))
14028 + return "swap.b\t%0";
14029 + else
14030 + return "mov\t%0, %1\;swap.b\t%0";
14031 + case 1:
14032 + return "stswp.w\t%0, %1";
14033 + case 2:
14034 + return "ldswp.w\t%0, %1";
14035 + default:
14036 + abort();
14037 + }
14038 + }
14039 + "(reload_completed &&
14040 + REG_P(operands[0]) && REG_P(operands[1])
14041 + && (REGNO(operands[0]) != REGNO(operands[1])))"
14042 + [(set (match_dup 0) (match_dup 1))
14043 + (set (match_dup 0)
14044 + (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_dup 0)
14045 + (const_int 4278190080))
14046 + (const_int 24))
14047 + (lshiftrt:SI (and:SI (match_dup 0)
14048 + (const_int 16711680))
14049 + (const_int 8)))
14050 + (ior:SI (ashift:SI (and:SI (match_dup 0)
14051 + (const_int 65280))
14052 + (const_int 8))
14053 + (ashift:SI (and:SI (match_dup 0)
14054 + (const_int 255))
14055 + (const_int 24)))))]
14056 + ""
14057 +
14058 + [(set_attr "length" "4,4,4")
14059 + (set_attr "type" "alu,store,load_rm")]
14060 + )
14061 +
14062 +
14063 +;;=============================================================================
14064 +;; blockage
14065 +;;-----------------------------------------------------------------------------
14066 +;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
14067 +;; all of memory. This blocks insns from being moved across this point.
14068 +
14069 +(define_insn "blockage"
14070 + [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)]
14071 + ""
14072 + ""
14073 + [(set_attr "length" "0")]
14074 +)
14075 +
14076 +;;=============================================================================
14077 +;; clzsi2
14078 +;;-----------------------------------------------------------------------------
14079 +(define_insn "clzsi2"
14080 + [ (set (match_operand:SI 0 "register_operand" "=r")
14081 + (clz:SI (match_operand:SI 1 "register_operand" "r"))) ]
14082 + ""
14083 + "clz %0, %1"
14084 + [(set_attr "length" "4")
14085 + (set_attr "cc" "set_z")]
14086 + )
14087 +
14088 +;;=============================================================================
14089 +;; ctzsi2
14090 +;;-----------------------------------------------------------------------------
14091 +(define_insn "ctzsi2"
14092 + [ (set (match_operand:SI 0 "register_operand" "=r,r")
14093 + (ctz:SI (match_operand:SI 1 "register_operand" "0,r"))) ]
14094 + ""
14095 + "@
14096 + brev\t%0\;clz\t%0, %0
14097 + mov\t%0, %1\;brev\t%0\;clz\t%0, %0"
14098 + [(set_attr "length" "8")
14099 + (set_attr "cc" "set_z")]
14100 + )
14101 +
14102 +;;=============================================================================
14103 +;; cache instructions
14104 +;;-----------------------------------------------------------------------------
14105 +(define_insn "cache"
14106 + [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r")
14107 + (match_operand:SI 1 "immediate_operand" "Ku05")] VUNSPEC_CACHE)]
14108 + ""
14109 + "cache %0[0], %1"
14110 + [(set_attr "length" "4")]
14111 + )
14112 +
14113 +(define_insn "sync"
14114 + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku08")] VUNSPEC_SYNC)]
14115 + ""
14116 + "sync %0"
14117 + [(set_attr "length" "4")]
14118 + )
14119 +
14120 +;;=============================================================================
14121 +;; TLB instructions
14122 +;;-----------------------------------------------------------------------------
14123 +(define_insn "tlbr"
14124 + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBR)]
14125 + ""
14126 + "tlbr"
14127 + [(set_attr "length" "2")]
14128 + )
14129 +
14130 +(define_insn "tlbw"
14131 + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBW)]
14132 + ""
14133 + "tlbw"
14134 + [(set_attr "length" "2")]
14135 + )
14136 +
14137 +(define_insn "tlbs"
14138 + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBS)]
14139 + ""
14140 + "tlbs"
14141 + [(set_attr "length" "2")]
14142 + )
14143 +
14144 +;;=============================================================================
14145 +;; Breakpoint instruction
14146 +;;-----------------------------------------------------------------------------
14147 +(define_insn "breakpoint"
14148 + [ (unspec_volatile [(const_int 0)] VUNSPEC_BREAKPOINT)]
14149 + ""
14150 + "breakpoint"
14151 + [(set_attr "length" "2")]
14152 + )
14153 +
14154 +;;=============================================================================
14155 +;; Xchg instruction
14156 +;;-----------------------------------------------------------------------------
14157 +(define_insn "xchg"
14158 + [ (parallel [(set (match_operand:SI 0 "register_operand" "=&r")
14159 + (mem:SI (match_operand:SI 1 "register_operand" "r")))
14160 + (set (mem:SI (match_operand:SI 2 "register_operand" "=1"))
14161 + (match_operand:SI 3 "register_operand" "r"))])]
14162 + ""
14163 + "xchg\t%0, %1, %3"
14164 + [(set_attr "length" "4")]
14165 + )
14166 +
14167 +;;=============================================================================
14168 +;; mtsr/mfsr instruction
14169 +;;-----------------------------------------------------------------------------
14170 +(define_insn "mtsr"
14171 + [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
14172 + (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTSR)]
14173 + ""
14174 + "mtsr\t%0, %1"
14175 + [(set_attr "length" "4")]
14176 + )
14177 +
14178 +(define_insn "mfsr"
14179 + [ (set (match_operand:SI 0 "register_operand" "=r")
14180 + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFSR)) ]
14181 + ""
14182 + "mfsr\t%0, %1"
14183 + [(set_attr "length" "4")]
14184 + )
14185 +
14186 +;;=============================================================================
14187 +;; mtdr/mfdr instruction
14188 +;;-----------------------------------------------------------------------------
14189 +(define_insn "mtdr"
14190 + [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
14191 + (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTDR)]
14192 + ""
14193 + "mtdr\t%0, %1"
14194 + [(set_attr "length" "4")]
14195 + )
14196 +
14197 +(define_insn "mfdr"
14198 + [ (set (match_operand:SI 0 "register_operand" "=r")
14199 + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFDR)) ]
14200 + ""
14201 + "mfdr\t%0, %1"
14202 + [(set_attr "length" "4")]
14203 + )
14204 +
14205 +;;=============================================================================
14206 +;; musfr
14207 +;;-----------------------------------------------------------------------------
14208 +(define_insn "musfr"
14209 + [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r")] VUNSPEC_MUSFR)]
14210 + ""
14211 + "musfr\t%0"
14212 + [(set_attr "length" "2")
14213 + (set_attr "cc" "clobber")]
14214 + )
14215 +
14216 +(define_insn "mustr"
14217 + [ (set (match_operand:SI 0 "register_operand" "=r")
14218 + (unspec_volatile:SI [(const_int 0)] VUNSPEC_MUSTR)) ]
14219 + ""
14220 + "mustr\t%0"
14221 + [(set_attr "length" "2")]
14222 + )
14223 +
14224 +;;=============================================================================
14225 +;; Saturation Round Scale instruction
14226 +;;-----------------------------------------------------------------------------
14227 +(define_insn "sats"
14228 + [ (set (match_operand:SI 0 "register_operand" "+r")
14229 + (unspec:SI [(match_dup 0)
14230 + (match_operand 1 "immediate_operand" "Ku05")
14231 + (match_operand 2 "immediate_operand" "Ku05")]
14232 + UNSPEC_SATS)) ]
14233 + "TARGET_DSP"
14234 + "sats\t%0 >> %1, %2"
14235 + [(set_attr "type" "alu_sat")
14236 + (set_attr "length" "4")]
14237 + )
14238 +
14239 +(define_insn "satu"
14240 + [ (set (match_operand:SI 0 "register_operand" "+r")
14241 + (unspec:SI [(match_dup 0)
14242 + (match_operand 1 "immediate_operand" "Ku05")
14243 + (match_operand 2 "immediate_operand" "Ku05")]
14244 + UNSPEC_SATU)) ]
14245 + "TARGET_DSP"
14246 + "satu\t%0 >> %1, %2"
14247 + [(set_attr "type" "alu_sat")
14248 + (set_attr "length" "4")]
14249 + )
14250 +
14251 +(define_insn "satrnds"
14252 + [ (set (match_operand:SI 0 "register_operand" "+r")
14253 + (unspec:SI [(match_dup 0)
14254 + (match_operand 1 "immediate_operand" "Ku05")
14255 + (match_operand 2 "immediate_operand" "Ku05")]
14256 + UNSPEC_SATRNDS)) ]
14257 + "TARGET_DSP"
14258 + "satrnds\t%0 >> %1, %2"
14259 + [(set_attr "type" "alu_sat")
14260 + (set_attr "length" "4")]
14261 + )
14262 +
14263 +(define_insn "satrndu"
14264 + [ (set (match_operand:SI 0 "register_operand" "+r")
14265 + (unspec:SI [(match_dup 0)
14266 + (match_operand 1 "immediate_operand" "Ku05")
14267 + (match_operand 2 "immediate_operand" "Ku05")]
14268 + UNSPEC_SATRNDU)) ]
14269 + "TARGET_DSP"
14270 + "sats\t%0 >> %1, %2"
14271 + [(set_attr "type" "alu_sat")
14272 + (set_attr "length" "4")]
14273 + )
14274 +
14275 +;; Special patterns for dealing with the constant pool
14276 +
14277 +(define_insn "align_4"
14278 + [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN)]
14279 + ""
14280 + {
14281 + assemble_align (32);
14282 + return "";
14283 + }
14284 + [(set_attr "length" "2")]
14285 +)
14286 +
14287 +(define_insn "consttable_start"
14288 + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_START)]
14289 + ""
14290 + {
14291 + return ".cpool";
14292 + }
14293 + [(set_attr "length" "0")]
14294 + )
14295 +
14296 +(define_insn "consttable_end"
14297 + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_END)]
14298 + ""
14299 + {
14300 + making_const_table = FALSE;
14301 + return "";
14302 + }
14303 + [(set_attr "length" "0")]
14304 +)
14305 +
14306 +
14307 +(define_insn "consttable_4"
14308 + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_4)]
14309 + ""
14310 + {
14311 + making_const_table = TRUE;
14312 + switch (GET_MODE_CLASS (GET_MODE (operands[0])))
14313 + {
14314 + case MODE_FLOAT:
14315 + {
14316 + REAL_VALUE_TYPE r;
14317 + char real_string[1024];
14318 + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
14319 + real_to_decimal(real_string, &r, 1024, 0, 1);
14320 + asm_fprintf (asm_out_file, "\t.float\t%s\n", real_string);
14321 + break;
14322 + }
14323 + default:
14324 + assemble_integer (operands[0], 4, 0, 1);
14325 + break;
14326 + }
14327 + return "";
14328 + }
14329 + [(set_attr "length" "4")]
14330 +)
14331 +
14332 +(define_insn "consttable_8"
14333 + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_8)]
14334 + ""
14335 + {
14336 + making_const_table = TRUE;
14337 + switch (GET_MODE_CLASS (GET_MODE (operands[0])))
14338 + {
14339 + case MODE_FLOAT:
14340 + {
14341 + REAL_VALUE_TYPE r;
14342 + char real_string[1024];
14343 + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
14344 + real_to_decimal(real_string, &r, 1024, 0, 1);
14345 + asm_fprintf (asm_out_file, "\t.double\t%s\n", real_string);
14346 + break;
14347 + }
14348 + default:
14349 + assemble_integer(operands[0], 8, 0, 1);
14350 + break;
14351 + }
14352 + return "";
14353 + }
14354 + [(set_attr "length" "8")]
14355 +)
14356 +
14357 +;;=============================================================================
14358 +;; coprocessor instructions
14359 +;;-----------------------------------------------------------------------------
14360 +(define_insn "cop"
14361 + [ (unspec_volatile [(match_operand 0 "immediate_operand" "Ku03")
14362 + (match_operand 1 "immediate_operand" "Ku04")
14363 + (match_operand 2 "immediate_operand" "Ku04")
14364 + (match_operand 3 "immediate_operand" "Ku04")
14365 + (match_operand 4 "immediate_operand" "Ku07")] VUNSPEC_COP)]
14366 + ""
14367 + "cop\tcp%0, cr%1, cr%2, cr%3, %4"
14368 + [(set_attr "length" "4")]
14369 + )
14370 +
14371 +(define_insn "mvcrsi"
14372 + [ (set (match_operand:SI 0 "avr32_cop_move_operand" "=r,<,Z")
14373 + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
14374 + (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
14375 + VUNSPEC_MVCR)) ]
14376 + ""
14377 + "@
14378 + mvcr.w\tcp%1, %0, cr%2
14379 + stcm.w\tcp%1, %0, cr%2
14380 + stc.w\tcp%1, %0, cr%2"
14381 + [(set_attr "length" "4")]
14382 + )
14383 +
14384 +(define_insn "mvcrdi"
14385 + [ (set (match_operand:DI 0 "avr32_cop_move_operand" "=r,<,Z")
14386 + (unspec_volatile:DI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
14387 + (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
14388 + VUNSPEC_MVCR)) ]
14389 + ""
14390 + "@
14391 + mvcr.d\tcp%1, %0, cr%2
14392 + stcm.d\tcp%1, %0, cr%2-cr%i2
14393 + stc.d\tcp%1, %0, cr%2"
14394 + [(set_attr "length" "4")]
14395 + )
14396 +
14397 +(define_insn "mvrcsi"
14398 + [ (unspec_volatile:SI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
14399 + (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
14400 + (match_operand:SI 2 "avr32_cop_move_operand" "r,>,Z")]
14401 + VUNSPEC_MVRC)]
14402 + ""
14403 + {
14404 + switch (which_alternative){
14405 + case 0:
14406 + return "mvrc.w\tcp%0, cr%1, %2";
14407 + case 1:
14408 + return "ldcm.w\tcp%0, %2, cr%1";
14409 + case 2:
14410 + return "ldc.w\tcp%0, cr%1, %2";
14411 + default:
14412 + abort();
14413 + }
14414 + }
14415 + [(set_attr "length" "4")]
14416 + )
14417 +
14418 +(define_insn "mvrcdi"
14419 + [ (unspec_volatile:DI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
14420 + (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
14421 + (match_operand:DI 2 "avr32_cop_move_operand" "r,>,Z")]
14422 + VUNSPEC_MVRC)]
14423 + ""
14424 + {
14425 + switch (which_alternative){
14426 + case 0:
14427 + return "mvrc.d\tcp%0, cr%1, %2";
14428 + case 1:
14429 + return "ldcm.d\tcp%0, %2, cr%1-cr%i1";
14430 + case 2:
14431 + return "ldc.d\tcp%0, cr%1, %2";
14432 + default:
14433 + abort();
14434 + }
14435 + }
14436 + [(set_attr "length" "4")]
14437 + )
14438 +
14439 +;;=============================================================================
14440 +;; epilogue
14441 +;;-----------------------------------------------------------------------------
14442 +;; This pattern emits RTL for exit from a function. The function exit is
14443 +;; responsible for deallocating the stack frame, restoring callee saved
14444 +;; registers and emitting the return instruction.
14445 +;; ToDo: using TARGET_ASM_FUNCTION_PROLOGUE instead.
14446 +;;=============================================================================
14447 +(define_expand "epilogue"
14448 + [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
14449 + ""
14450 + "
14451 + if (USE_RETURN_INSN (FALSE)){
14452 + emit_jump_insn (gen_return ());
14453 + DONE;
14454 + }
14455 + emit_jump_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode,
14456 + gen_rtvec (1,
14457 + gen_rtx_RETURN (VOIDmode)),
14458 + VUNSPEC_EPILOGUE));
14459 + DONE;
14460 + "
14461 + )
14462 +
14463 +(define_insn "*epilogue_insns"
14464 + [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
14465 + ""
14466 + {
14467 + avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
14468 + return "";
14469 + }
14470 + ; Length is absolute worst case
14471 + [(set_attr "type" "branch")
14472 + (set_attr "length" "12")]
14473 + )
14474 +
14475 +(define_insn "*epilogue_insns_ret_imm"
14476 + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
14477 + (use (reg RETVAL_REGNUM))
14478 + (unspec_volatile [(return)] VUNSPEC_EPILOGUE)])]
14479 + "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
14480 + {
14481 + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
14482 + return "";
14483 + }
14484 + ; Length is absolute worst case
14485 + [(set_attr "type" "branch")
14486 + (set_attr "length" "12")]
14487 + )
14488 +
14489 +(define_insn "sibcall_epilogue"
14490 + [(unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)]
14491 + ""
14492 + {
14493 + avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
14494 + return "";
14495 + }
14496 +;; Length is absolute worst case
14497 + [(set_attr "type" "branch")
14498 + (set_attr "length" "12")]
14499 + )
14500 +
14501 +(define_insn "*sibcall_epilogue_insns_ret_imm"
14502 + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
14503 + (use (reg RETVAL_REGNUM))
14504 + (unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)])]
14505 + "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
14506 + {
14507 + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
14508 + return "";
14509 + }
14510 + ; Length is absolute worst case
14511 + [(set_attr "type" "branch")
14512 + (set_attr "length" "12")]
14513 + )
14514 +
14515 +(define_insn "ldxi"
14516 + [(set (match_operand:SI 0 "register_operand" "=r")
14517 + (mem:SI (plus:SI
14518 + (match_operand:SI 1 "register_operand" "r")
14519 + (mult:SI (zero_extract:SI (match_operand:SI 2 "register_operand" "r")
14520 + (const_int 8)
14521 + (match_operand:SI 3 "immediate_operand" "Ku05"))
14522 + (const_int 4)))))]
14523 + "(INTVAL(operands[3]) == 24 || INTVAL(operands[3]) == 16 || INTVAL(operands[3]) == 8
14524 + || INTVAL(operands[3]) == 0)"
14525 + {
14526 + switch ( INTVAL(operands[3]) ){
14527 + case 0:
14528 + return "ld.w %0, %1[%2:b << 2]";
14529 + case 8:
14530 + return "ld.w %0, %1[%2:l << 2]";
14531 + case 16:
14532 + return "ld.w %0, %1[%2:u << 2]";
14533 + case 24:
14534 + return "ld.w %0, %1[%2:t << 2]";
14535 + default:
14536 + internal_error("illegal operand for ldxi");
14537 + }
14538 + }
14539 + [(set_attr "type" "load")
14540 + (set_attr "length" "4")
14541 + (set_attr "cc" "none")])
14542 +
14543 +
14544 +
14545 +
14546 +
14547 +
14548 +;;=============================================================================
14549 +;; Peephole optimizing
14550 +;;-----------------------------------------------------------------------------
14551 +;; Changing
14552 +;; sub r8, r7, 8
14553 +;; st.w r8[0x0], r12
14554 +;; to
14555 +;; sub r8, r7, 8
14556 +;; st.w r7[-0x8], r12
14557 +;;=============================================================================
14558 +; (set (reg:SI 9 r8)
14559 +; (plus:SI (reg/f:SI 6 r7)
14560 +; (const_int ...)))
14561 +; (set (mem:SI (reg:SI 9 r8))
14562 +; (reg:SI 12 r12))
14563 +(define_peephole2
14564 + [(set (match_operand:SI 0 "register_operand" "")
14565 + (plus:SI (match_operand:SI 1 "register_operand" "")
14566 + (match_operand:SI 2 "immediate_operand" "")))
14567 + (set (mem:SI (match_dup 0))
14568 + (match_operand:SI 3 "register_operand" ""))]
14569 + "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")"
14570 + [(set (match_dup 0)
14571 + (plus:SI (match_dup 1)
14572 + (match_dup 2)))
14573 + (set (mem:SI (plus:SI (match_dup 1)
14574 + (match_dup 2)))
14575 + (match_dup 3))]
14576 + "")
14577 +
14578 +;;=============================================================================
14579 +;; Peephole optimizing
14580 +;;-----------------------------------------------------------------------------
14581 +;; Changing
14582 +;; sub r6, r7, 4
14583 +;; ld.w r6, r6[0x0]
14584 +;; to
14585 +;; sub r6, r7, 4
14586 +;; ld.w r6, r7[-0x4]
14587 +;;=============================================================================
14588 +; (set (reg:SI 7 r6)
14589 +; (plus:SI (reg/f:SI 6 r7)
14590 +; (const_int -4 [0xfffffffc])))
14591 +; (set (reg:SI 7 r6)
14592 +; (mem:SI (reg:SI 7 r6)))
14593 +(define_peephole2
14594 + [(set (match_operand:SI 0 "register_operand" "")
14595 + (plus:SI (match_operand:SI 1 "register_operand" "")
14596 + (match_operand:SI 2 "immediate_operand" "")))
14597 + (set (match_operand:SI 3 "register_operand" "")
14598 + (mem:SI (match_dup 0)))]
14599 + "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")"
14600 + [(set (match_dup 0)
14601 + (plus:SI (match_dup 1)
14602 + (match_dup 2)))
14603 + (set (match_dup 3)
14604 + (mem:SI (plus:SI (match_dup 1)
14605 + (match_dup 2))))]
14606 + "")
14607 +
14608 +;;=============================================================================
14609 +;; Peephole optimizing
14610 +;;-----------------------------------------------------------------------------
14611 +;; Changing
14612 +;; ld.sb r0, r7[-0x6]
14613 +;; cashs.b r0
14614 +;; to
14615 +;; ld.sb r0, r7[-0x6]
14616 +;;=============================================================================
14617 +(define_peephole2
14618 + [(set (match_operand:QI 0 "register_operand" "")
14619 + (match_operand:QI 1 "load_sb_memory_operand" ""))
14620 + (set (match_operand:SI 2 "register_operand" "")
14621 + (sign_extend:SI (match_dup 0)))]
14622 + "(REGNO(operands[0]) == REGNO(operands[2]) || peep2_reg_dead_p(2, operands[0]))"
14623 + [(set (match_dup 2)
14624 + (sign_extend:SI (match_dup 1)))]
14625 + "")
14626 +
14627 +;;=============================================================================
14628 +;; Peephole optimizing
14629 +;;-----------------------------------------------------------------------------
14630 +;; Changing
14631 +;; ld.ub r0, r7[-0x6]
14632 +;; cashu.b r0
14633 +;; to
14634 +;; ld.ub r0, r7[-0x6]
14635 +;;=============================================================================
14636 +(define_peephole2
14637 + [(set (match_operand:QI 0 "register_operand" "")
14638 + (match_operand:QI 1 "memory_operand" ""))
14639 + (set (match_operand:SI 2 "register_operand" "")
14640 + (zero_extend:SI (match_dup 0)))]
14641 + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
14642 + [(set (match_dup 2)
14643 + (zero_extend:SI (match_dup 1)))]
14644 + "")
14645 +
14646 +;;=============================================================================
14647 +;; Peephole optimizing
14648 +;;-----------------------------------------------------------------------------
14649 +;; Changing
14650 +;; ld.sh r0, r7[-0x6]
14651 +;; casts.h r0
14652 +;; to
14653 +;; ld.sh r0, r7[-0x6]
14654 +;;=============================================================================
14655 +(define_peephole2
14656 + [(set (match_operand:HI 0 "register_operand" "")
14657 + (match_operand:HI 1 "memory_operand" ""))
14658 + (set (match_operand:SI 2 "register_operand" "")
14659 + (sign_extend:SI (match_dup 0)))]
14660 + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
14661 + [(set (match_dup 2)
14662 + (sign_extend:SI (match_dup 1)))]
14663 + "")
14664 +
14665 +;;=============================================================================
14666 +;; Peephole optimizing
14667 +;;-----------------------------------------------------------------------------
14668 +;; Changing
14669 +;; ld.uh r0, r7[-0x6]
14670 +;; castu.h r0
14671 +;; to
14672 +;; ld.uh r0, r7[-0x6]
14673 +;;=============================================================================
14674 +(define_peephole2
14675 + [(set (match_operand:HI 0 "register_operand" "")
14676 + (match_operand:HI 1 "memory_operand" ""))
14677 + (set (match_operand:SI 2 "register_operand" "")
14678 + (zero_extend:SI (match_dup 0)))]
14679 + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
14680 + [(set (match_dup 2)
14681 + (zero_extend:SI (match_dup 1)))]
14682 + "")
14683 +
14684 +;;=============================================================================
14685 +;; Peephole optimizing
14686 +;;-----------------------------------------------------------------------------
14687 +;; Changing
14688 +;; mul rd, rx, ry
14689 +;; add rd2, rd
14690 +;; to
14691 +;; mac rd2, rx, ry
14692 +;;=============================================================================
14693 +(define_peephole2
14694 + [(set (match_operand:SI 0 "register_operand" "")
14695 + (mult:SI (match_operand:SI 1 "register_operand" "")
14696 + (match_operand:SI 2 "register_operand" "")))
14697 + (set (match_operand:SI 3 "register_operand" "")
14698 + (plus:SI (match_dup 3)
14699 + (match_dup 0)))]
14700 + "peep2_reg_dead_p(2, operands[0])"
14701 + [(set (match_dup 3)
14702 + (plus:SI (mult:SI (match_dup 1)
14703 + (match_dup 2))
14704 + (match_dup 3)))]
14705 + "")
14706 +
14707 +
14708 +
14709 +;;=============================================================================
14710 +;; Peephole optimizing
14711 +;;-----------------------------------------------------------------------------
14712 +;; Changing
14713 +;; bfextu rd, rs, k5, 1 or and(h/l) rd, one_bit_set_mask
14714 +;; to
14715 +;; bld rs, k5
14716 +;;
14717 +;; If rd is dead after the operation.
14718 +;;=============================================================================
14719 +(define_peephole2
14720 + [ (set (match_operand:SI 0 "register_operand" "")
14721 + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
14722 + (const_int 1)
14723 + (match_operand:SI 2 "immediate_operand" "")))
14724 + (set (cc0)
14725 + (match_dup 0))]
14726 + "peep2_reg_dead_p(2, operands[0])"
14727 + [(set (cc0)
14728 + (and:SI (match_dup 1)
14729 + (match_dup 2)))]
14730 + "operands[2] = GEN_INT(1 << INTVAL(operands[2]));")
14731 +
14732 +(define_peephole2
14733 + [ (set (match_operand:SI 0 "register_operand" "")
14734 + (and:SI (match_operand:SI 1 "register_operand" "")
14735 + (match_operand:SI 2 "one_bit_set_operand" "")))
14736 + (set (cc0)
14737 + (match_dup 0))]
14738 + "peep2_reg_dead_p(2, operands[0])"
14739 + [(set (cc0)
14740 + (and:SI (match_dup 1)
14741 + (match_dup 2)))]
14742 + "")
14743 +
14744 +;;=============================================================================
14745 +;; Peephole optimizing
14746 +;;-----------------------------------------------------------------------------
14747 +;; Load with extracted index: ld.w Rd, Rb[Ri:{t/u/b/l} << 2]
14748 +;;
14749 +;;=============================================================================
14750 +
14751 +
14752 +(define_peephole
14753 + [(set (match_operand:SI 0 "register_operand" "")
14754 + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
14755 + (const_int 8)
14756 + (match_operand:SI 2 "avr32_extract_shift_operand" "")))
14757 + (set (match_operand:SI 3 "register_operand" "")
14758 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
14759 + (match_operand:SI 4 "register_operand" ""))))]
14760 +
14761 + "(dead_or_set_p(insn, operands[0]))"
14762 + {
14763 + switch ( INTVAL(operands[2]) ){
14764 + case 0:
14765 + return "ld.w %3, %4[%1:b << 2]";
14766 + case 8:
14767 + return "ld.w %3, %4[%1:l << 2]";
14768 + case 16:
14769 + return "ld.w %3, %4[%1:u << 2]";
14770 + case 24:
14771 + return "ld.w %3, %4[%1:t << 2]";
14772 + default:
14773 + internal_error("illegal operand for ldxi");
14774 + }
14775 + }
14776 + [(set_attr "type" "load")
14777 + (set_attr "length" "4")
14778 + (set_attr "cc" "clobber")]
14779 + )
14780 +
14781 +
14782 +
14783 +(define_peephole
14784 + [(set (match_operand:SI 0 "register_operand" "")
14785 + (and:SI (match_operand:SI 1 "register_operand" "") (const_int 255)))
14786 + (set (match_operand:SI 2 "register_operand" "")
14787 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
14788 + (match_operand:SI 3 "register_operand" ""))))]
14789 +
14790 + "(dead_or_set_p(insn, operands[0]))"
14791 +
14792 + "ld.w %2, %3[%1:b << 2]"
14793 + [(set_attr "type" "load")
14794 + (set_attr "length" "4")
14795 + (set_attr "cc" "clobber")]
14796 + )
14797 +
14798 +
14799 +(define_peephole2
14800 + [(set (match_operand:SI 0 "register_operand" "")
14801 + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
14802 + (const_int 8)
14803 + (match_operand:SI 2 "avr32_extract_shift_operand" "")))
14804 + (set (match_operand:SI 3 "register_operand" "")
14805 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
14806 + (match_operand:SI 4 "register_operand" ""))))]
14807 +
14808 + "(peep2_reg_dead_p(2, operands[0]))
14809 + || (REGNO(operands[0]) == REGNO(operands[3]))"
14810 + [(set (match_dup 3)
14811 + (mem:SI (plus:SI
14812 + (match_dup 4)
14813 + (mult:SI (zero_extract:SI (match_dup 1)
14814 + (const_int 8)
14815 + (match_dup 2))
14816 + (const_int 4)))))]
14817 + )
14818 +
14819 +(define_peephole2
14820 + [(set (match_operand:SI 0 "register_operand" "")
14821 + (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
14822 + (set (match_operand:SI 2 "register_operand" "")
14823 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
14824 + (match_operand:SI 3 "register_operand" ""))))]
14825 +
14826 + "(peep2_reg_dead_p(2, operands[0]))
14827 + || (REGNO(operands[0]) == REGNO(operands[2]))"
14828 + [(set (match_dup 2)
14829 + (mem:SI (plus:SI
14830 + (match_dup 3)
14831 + (mult:SI (zero_extract:SI (match_dup 1)
14832 + (const_int 8)
14833 + (const_int 0))
14834 + (const_int 4)))))]
14835 + "operands[1] = gen_rtx_REG(SImode, REGNO(operands[1]));"
14836 + )
14837 +
14838 +
14839 +(define_peephole2
14840 + [(set (match_operand:SI 0 "register_operand" "")
14841 + (and:SI (match_operand:SI 1 "register_operand" "")
14842 + (const_int 255)))
14843 + (set (match_operand:SI 2 "register_operand" "")
14844 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
14845 + (match_operand:SI 3 "register_operand" ""))))]
14846 +
14847 + "(peep2_reg_dead_p(2, operands[0]))
14848 + || (REGNO(operands[0]) == REGNO(operands[2]))"
14849 + [(set (match_dup 2)
14850 + (mem:SI (plus:SI
14851 + (match_dup 3)
14852 + (mult:SI (zero_extract:SI (match_dup 1)
14853 + (const_int 8)
14854 + (const_int 0))
14855 + (const_int 4)))))]
14856 + ""
14857 + )
14858 +
14859 +
14860 +
14861 +(define_peephole2
14862 + [(set (match_operand:SI 0 "register_operand" "")
14863 + (lshiftrt:SI (match_operand:SI 1 "register_operand" "")
14864 + (const_int 24)))
14865 + (set (match_operand:SI 2 "register_operand" "")
14866 + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
14867 + (match_operand:SI 3 "register_operand" ""))))]
14868 +
14869 + "(peep2_reg_dead_p(2, operands[0]))
14870 + || (REGNO(operands[0]) == REGNO(operands[2]))"
14871 + [(set (match_dup 2)
14872 + (mem:SI (plus:SI
14873 + (match_dup 3)
14874 + (mult:SI (zero_extract:SI (match_dup 1)
14875 + (const_int 8)
14876 + (const_int 24))
14877 + (const_int 4)))))]
14878 + ""
14879 + )
14880 +
14881 +
14882 +;;************************************************
14883 +;; ANDN
14884 +;;
14885 +;;************************************************
14886 +
14887 +
14888 +(define_peephole2
14889 + [(set (match_operand:SI 0 "register_operand" "")
14890 + (not:SI (match_operand:SI 1 "register_operand" "")))
14891 + (set (match_operand:SI 2 "register_operand" "")
14892 + (and:SI (match_dup 2)
14893 + (match_dup 0)))]
14894 + "peep2_reg_dead_p(2, operands[0])"
14895 +
14896 + [(set (match_dup 2)
14897 + (and:SI (match_dup 2)
14898 + (not:SI (match_dup 1))
14899 + ))]
14900 + ""
14901 +)
14902 +
14903 +(define_peephole2
14904 + [(set (match_operand:SI 0 "register_operand" "")
14905 + (not:SI (match_operand:SI 1 "register_operand" "")))
14906 + (set (match_operand:SI 2 "register_operand" "")
14907 + (and:SI (match_dup 0)
14908 + (match_dup 2)
14909 + ))]
14910 + "peep2_reg_dead_p(2, operands[0])"
14911 +
14912 + [(set (match_dup 2)
14913 + (and:SI (match_dup 2)
14914 + (not:SI (match_dup 1))
14915 + ))]
14916 +
14917 + ""
14918 +)
14919 +
14920 +
14921 +;;=================================================================
14922 +;; Addabs peephole
14923 +;;=================================================================
14924 +
14925 +(define_peephole
14926 + [(set (match_operand:SI 2 "register_operand" "=r")
14927 + (abs:SI (match_operand:SI 1 "register_operand" "r")))
14928 + (set (match_operand:SI 0 "register_operand" "=r")
14929 + (plus:SI (match_operand:SI 3 "register_operand" "r")
14930 + (match_dup 2)))]
14931 + "dead_or_set_p(insn, operands[2])"
14932 + "addabs %0, %3, %1"
14933 + [(set_attr "length" "4")
14934 + (set_attr "cc" "set_z")])
14935 +
14936 +(define_peephole
14937 + [(set (match_operand:SI 2 "register_operand" "=r")
14938 + (abs:SI (match_operand:SI 1 "register_operand" "r")))
14939 + (set (match_operand:SI 0 "register_operand" "=r")
14940 + (plus:SI (match_dup 2)
14941 + (match_operand:SI 3 "register_operand" "r")))]
14942 + "dead_or_set_p(insn, operands[2])"
14943 + "addabs %0, %3, %1"
14944 + [(set_attr "length" "4")
14945 + (set_attr "cc" "set_z")])
14946 +
14947 +
14948 +;;=================================================================
14949 +;; Detect roundings
14950 +;;=================================================================
14951 +
14952 +(define_insn "*round"
14953 + [(set (match_operand:SI 0 "register_operand" "=r")
14954 + (ashiftrt:SI (plus:SI (match_operand:SI 1 "register_operand" "0")
14955 + (match_operand:SI 2 "immediate_operand" "i"))
14956 + (match_operand:SI 3 "immediate_operand" "i")))]
14957 + "avr32_rnd_operands(operands[2], operands[3])"
14958 +
14959 + "satrnds %0 >> %3, 31"
14960 +
14961 + [(set_attr "type" "alu_sat")
14962 + (set_attr "length" "4")]
14963 +
14964 + )
14965 +
14966 +
14967 +(define_peephole2
14968 + [(set (match_operand:SI 0 "register_operand" "")
14969 + (plus:SI (match_dup 0)
14970 + (match_operand:SI 1 "immediate_operand" "")))
14971 + (set (match_dup 0)
14972 + (ashiftrt:SI (match_dup 0)
14973 + (match_operand:SI 2 "immediate_operand" "")))]
14974 + "avr32_rnd_operands(operands[1], operands[2])"
14975 +
14976 + [(set (match_dup 0)
14977 + (ashiftrt:SI (plus:SI (match_dup 0)
14978 + (match_dup 1))
14979 + (match_dup 2)))]
14980 + )
14981 +
14982 +(define_peephole
14983 + [(set (match_operand:SI 0 "register_operand" "r")
14984 + (plus:SI (match_dup 0)
14985 + (match_operand:SI 1 "immediate_operand" "i")))
14986 + (set (match_dup 0)
14987 + (ashiftrt:SI (match_dup 0)
14988 + (match_operand:SI 2 "immediate_operand" "i")))]
14989 + "avr32_rnd_operands(operands[1], operands[2])"
14990 +
14991 + "satrnds %0 >> %2, 31"
14992 +
14993 + [(set_attr "type" "alu_sat")
14994 + (set_attr "length" "4")
14995 + (set_attr "cc" "clobber")]
14996 +
14997 + )
14998 +
14999 +
15000 +
15001 +
15002 +;;=================================================================
15003 +;; Conditional Subtract
15004 +;;=================================================================
15005 +
15006 +
15007 +(define_peephole
15008 + [(set (match_operand:SI 0 "register_operand" "")
15009 + (minus:SI (match_operand:SI 1 "register_operand" "")
15010 + (match_operand:SI 2 "immediate_operand" "")))
15011 + (set (match_dup 1)
15012 + (unspec:SI [(match_operand 5 "avr32_comparison_operator" "")
15013 + (match_dup 0)
15014 + (match_dup 1)
15015 + (match_operand 3 "general_operand" "")
15016 + (match_operand 4 "general_operand" "")]
15017 + UNSPEC_MOVSICC))]
15018 +
15019 + "(dead_or_set_p(insn, operands[0])) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks08\")"
15020 +
15021 + {
15022 +
15023 + operands[5] = avr32_output_cmp(operands[5], GET_MODE(operands[3]), operands[3], operands[4]);
15024 +
15025 + return "sub%5 %1, %2";
15026 + }
15027 +
15028 + [(set_attr "length" "10")
15029 + (set_attr "cc" "clobber")]
15030 + )
15031 +
15032 +(define_peephole
15033 + [(set (match_operand:SI 0 "register_operand" "")
15034 + (plus:SI (match_operand:SI 1 "register_operand" "")
15035 + (match_operand:SI 2 "immediate_operand" "")))
15036 + (set (match_dup 1)
15037 + (unspec:SI [(match_operand 5 "avr32_comparison_operator" "")
15038 + (match_dup 0)
15039 + (match_dup 1)
15040 + (match_operand 3 "general_operand" "")
15041 + (match_operand 4 "general_operand" "")]
15042 + UNSPEC_MOVSICC))]
15043 +
15044 + "(dead_or_set_p(insn, operands[0]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'I', \"Is08\"))"
15045 +
15046 + {
15047 + operands[5] = avr32_output_cmp(operands[5], GET_MODE(operands[3]), operands[3], operands[4]);
15048 +
15049 + return "sub%5 %1, %n2";
15050 + }
15051 + [(set_attr "length" "10")
15052 + (set_attr "cc" "clobber")]
15053 + )
15054 +
15055 +;;=================================================================
15056 +;; mcall
15057 +;;=================================================================
15058 +(define_peephole
15059 + [(set (match_operand:SI 0 "register_operand" "")
15060 + (match_operand 1 "avr32_const_pool_ref_operand" ""))
15061 + (parallel [(call (mem:SI (match_dup 0))
15062 + (match_operand 2 "" ""))
15063 + (clobber (reg:SI LR_REGNUM))])]
15064 + "dead_or_set_p(insn, operands[0])"
15065 + "mcall %1"
15066 + [(set_attr "type" "call")
15067 + (set_attr "length" "4")
15068 + (set_attr "cc" "clobber")]
15069 +)
15070 +
15071 +(define_peephole
15072 + [(set (match_operand:SI 2 "register_operand" "")
15073 + (match_operand 1 "avr32_const_pool_ref_operand" ""))
15074 + (parallel [(set (match_operand 0 "register_operand" "")
15075 + (call (mem:SI (match_dup 2))
15076 + (match_operand 3 "" "")))
15077 + (clobber (reg:SI LR_REGNUM))])]
15078 + "dead_or_set_p(insn, operands[2])"
15079 + "mcall %1"
15080 + [(set_attr "type" "call")
15081 + (set_attr "length" "4")
15082 + (set_attr "cc" "call_set")]
15083 +)
15084 +
15085 +
15086 +(define_peephole2
15087 + [(set (match_operand:SI 0 "register_operand" "")
15088 + (match_operand 1 "avr32_const_pool_ref_operand" ""))
15089 + (parallel [(call (mem:SI (match_dup 0))
15090 + (match_operand 2 "" ""))
15091 + (clobber (reg:SI LR_REGNUM))])]
15092 + "peep2_reg_dead_p(2, operands[0])"
15093 + [(parallel [(call (mem:SI (match_dup 1))
15094 + (match_dup 2))
15095 + (clobber (reg:SI LR_REGNUM))])]
15096 + ""
15097 +)
15098 +
15099 +(define_peephole2
15100 + [(set (match_operand:SI 0 "register_operand" "")
15101 + (match_operand 1 "avr32_const_pool_ref_operand" ""))
15102 + (parallel [(set (match_operand 2 "register_operand" "")
15103 + (call (mem:SI (match_dup 0))
15104 + (match_operand 3 "" "")))
15105 + (clobber (reg:SI LR_REGNUM))])]
15106 + "(peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[2]) == REGNO(operands[0])))"
15107 + [(parallel [(set (match_dup 2)
15108 + (call (mem:SI (match_dup 1))
15109 + (match_dup 3)))
15110 + (clobber (reg:SI LR_REGNUM))])]
15111 + ""
15112 +)
15113 +
15114 +;;=================================================================
15115 +;; Returning a value
15116 +;;=================================================================
15117 +
15118 +
15119 +(define_peephole
15120 + [(set (match_operand 0 "register_operand" "")
15121 + (match_operand 1 "register_operand" ""))
15122 + (return)]
15123 + "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)
15124 + && (REGNO(operands[1]) != LR_REGNUM)
15125 + && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS)"
15126 + "retal %1"
15127 + [(set_attr "type" "call")
15128 + (set_attr "length" "2")]
15129 + )
15130 +
15131 +
15132 +(define_peephole
15133 + [(set (match_operand 0 "register_operand" "r")
15134 + (match_operand 1 "immediate_operand" "i"))
15135 + (return)]
15136 + "(USE_RETURN_INSN (FALSE) && (REGNO(operands[0]) == RETVAL_REGNUM) &&
15137 + ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1)))"
15138 + {
15139 + avr32_output_return_instruction (TRUE, FALSE, NULL, operands[1]);
15140 + return "";
15141 + }
15142 + [(set_attr "type" "call")
15143 + (set_attr "length" "4")]
15144 + )
15145 +
15146 +(define_peephole
15147 + [(set (match_operand 0 "register_operand" "r")
15148 + (match_operand 1 "immediate_operand" "i"))
15149 + (unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
15150 + "(REGNO(operands[0]) == RETVAL_REGNUM) &&
15151 + ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1))"
15152 + {
15153 + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[1]);
15154 + return "";
15155 + }
15156 + ; Length is absolute worst case
15157 + [(set_attr "type" "branch")
15158 + (set_attr "length" "12")]
15159 + )
15160 +
15161 +(define_peephole
15162 + [(set (match_operand 0 "register_operand" "r")
15163 + (unspec [(match_operand 1 "avr32_comparison_operator" "")
15164 + (match_operand 2 "register_immediate_operand" "rKs08")
15165 + (match_operand 3 "register_immediate_operand" "rKs08")
15166 + (match_operand 4 "register_immediate_operand" "r")
15167 + (match_operand 5 "register_immediate_operand" "rKs21")
15168 + ]
15169 + UNSPEC_MOVSICC ))
15170 + (return)]
15171 + "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM) &&
15172 + ((GET_MODE(operands[4]) == SImode) ||
15173 + ((GET_MODE(operands[4]) != SImode) && (GET_CODE(operands[5]) == REG)))"
15174 + {
15175 + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
15176 +
15177 + if ( GET_CODE(operands[2]) == REG
15178 + && GET_CODE(operands[3]) == REG
15179 + && REGNO(operands[2]) != LR_REGNUM
15180 + && REGNO(operands[3]) != LR_REGNUM ){
15181 + return "ret%1 %2\;ret%i1 %3";
15182 + } else if ( GET_CODE(operands[2]) == REG
15183 + && GET_CODE(operands[3]) == CONST_INT ){
15184 + if ( INTVAL(operands[3]) == -1
15185 + || INTVAL(operands[3]) == 0
15186 + || INTVAL(operands[3]) == 1 ){
15187 + return "ret%1 %2\;ret%i1 %d3";
15188 + } else {
15189 + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
15190 + }
15191 + } else if ( GET_CODE(operands[2]) == CONST_INT
15192 + && GET_CODE(operands[3]) == REG ){
15193 + if ( INTVAL(operands[2]) == -1
15194 + || INTVAL(operands[2]) == 0
15195 + || INTVAL(operands[2]) == 1 ){
15196 + return "ret%1 %d2\;ret%i1 %3";
15197 + } else {
15198 + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
15199 + }
15200 + } else {
15201 + if ( (INTVAL(operands[2]) == -1
15202 + || INTVAL(operands[2]) == 0
15203 + || INTVAL(operands[2]) == 1 )
15204 + && (INTVAL(operands[3]) == -1
15205 + || INTVAL(operands[3]) == 0
15206 + || INTVAL(operands[3]) == 1 )){
15207 + return "ret%1 %d2\;ret%i1 %d3";
15208 + } else {
15209 + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
15210 + }
15211 + }
15212 + }
15213 +
15214 + [(set_attr "length" "14")
15215 + (set_attr "cc" "clobber")
15216 + (set_attr "type" "call")])
15217 +
15218 +
15219 +;;=================================================================
15220 +;; mulnhh.w
15221 +;;=================================================================
15222 +
15223 +(define_peephole2
15224 + [(set (match_operand:HI 0 "register_operand" "")
15225 + (neg:HI (match_operand:HI 1 "register_operand" "")))
15226 + (set (match_operand:SI 2 "register_operand" "")
15227 + (mult:SI
15228 + (sign_extend:SI (match_dup 0))
15229 + (sign_extend:SI (match_operand:HI 3 "register_operand" ""))))]
15230 + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
15231 + [ (set (match_dup 2)
15232 + (mult:SI
15233 + (sign_extend:SI (neg:HI (match_dup 1)))
15234 + (sign_extend:SI (match_dup 3))))]
15235 + ""
15236 + )
15237 +
15238 +(define_peephole2
15239 + [(set (match_operand:HI 0 "register_operand" "")
15240 + (neg:HI (match_operand:HI 1 "register_operand" "")))
15241 + (set (match_operand:SI 2 "register_operand" "")
15242 + (mult:SI
15243 + (sign_extend:SI (match_operand:HI 3 "register_operand" ""))
15244 + (sign_extend:SI (match_dup 0))))]
15245 + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
15246 + [ (set (match_dup 2)
15247 + (mult:SI
15248 + (sign_extend:SI (neg:HI (match_dup 1)))
15249 + (sign_extend:SI (match_dup 3))))]
15250 + ""
15251 + )
15252 +
15253 +
15254 +
15255 +;;=================================================================
15256 +;; sthh.w
15257 +;;=================================================================
15258 +(define_insn "vec_setv2hi"
15259 + [(set (match_operand:V2HI 0 "register_operand" "=r")
15260 + (vec_merge:V2HI
15261 + (match_dup 0)
15262 + (vec_duplicate:V2HI
15263 + (match_operand:HI 1 "register_operand" "r"))
15264 + (const_int 1)))]
15265 + ""
15266 + "bfins\t%0, %1, 16, 16"
15267 + [(set_attr "type" "alu")
15268 + (set_attr "length" "4")
15269 + (set_attr "cc" "clobber")])
15270 +
15271 +(define_insn "vec_setv2lo"
15272 + [(set (match_operand:V2HI 0 "register_operand" "+r")
15273 + (vec_merge:V2HI
15274 + (match_dup 0)
15275 + (vec_duplicate:V2HI
15276 + (match_operand:HI 1 "register_operand" "r"))
15277 + (const_int 2)))]
15278 + ""
15279 + "bfins\t%0, %1, 0, 16"
15280 + [(set_attr "type" "alu")
15281 + (set_attr "length" "4")
15282 + (set_attr "cc" "clobber")])
15283 +
15284 +(define_expand "vec_setv2"
15285 + [(set (match_operand:V2HI 0 "register_operand" "")
15286 + (vec_merge:V2HI
15287 + (match_dup 0)
15288 + (vec_duplicate:V2HI
15289 + (match_operand:HI 1 "register_operand" ""))
15290 + (match_operand 2 "immediate_operand" "")))]
15291 + ""
15292 + { operands[2] = GEN_INT(INTVAL(operands[2]) + 1); }
15293 + )
15294 +
15295 +(define_insn "vec_extractv2hi"
15296 + [(set (match_operand:HI 0 "register_operand" "=r")
15297 + (vec_select:HI
15298 + (match_operand:V2HI 1 "register_operand" "r")
15299 + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
15300 + ""
15301 + {
15302 + if ( INTVAL(operands[2]) == 0 )
15303 + return "bfextu\t%0, %1, 16, 16";
15304 + else
15305 + return "bfextu\t%0, %1, 0, 16";
15306 + }
15307 + [(set_attr "type" "alu")
15308 + (set_attr "length" "4")
15309 + (set_attr "cc" "clobber")])
15310 +
15311 +(define_insn "vec_extractv4qi"
15312 + [(set (match_operand:QI 0 "register_operand" "=r")
15313 + (vec_select:QI
15314 + (match_operand:V4QI 1 "register_operand" "r")
15315 + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
15316 + ""
15317 + {
15318 + switch ( INTVAL(operands[2]) ){
15319 + case 0:
15320 + return "bfextu\t%0, %1, 24, 8";
15321 + case 1:
15322 + return "bfextu\t%0, %1, 16, 8";
15323 + case 2:
15324 + return "bfextu\t%0, %1, 8, 8";
15325 + case 3:
15326 + return "bfextu\t%0, %1, 0, 8";
15327 + default:
15328 + abort();
15329 + }
15330 + }
15331 + [(set_attr "type" "alu")
15332 + (set_attr "length" "4")
15333 + (set_attr "cc" "clobber")])
15334 +
15335 +
15336 +(define_insn "concatv2hi"
15337 + [(set (match_operand:V2HI 0 "register_operand" "=r, r, r")
15338 + (vec_concat:V2HI
15339 + (match_operand:HI 1 "register_operand" "r, r, 0")
15340 + (match_operand:HI 2 "register_operand" "r, 0, r")))]
15341 + ""
15342 + "@
15343 + mov\t%0, %1\;bfins\t%0, %2, 0, 16
15344 + bfins\t%0, %2, 0, 16
15345 + bfins\t%0, %1, 16, 16"
15346 + [(set_attr "length" "6, 4, 4")
15347 + (set_attr "type" "alu")])
15348 +
15349 +;(define_peephole2
15350 +; [(set (match_operand:HI 0 "register_operand" "r")
15351 +; (plus:HI (match_operand:HI 3 "register_operand" "r")
15352 +; (match_operand:HI 4 "register_operand" "r")))
15353 +; (set (match_operand:HI 1 "register_operand" "r")
15354 +; (minus:HI (match_dup 3)
15355 +; (match_dup 4)))]
15356 +; "REGNO(operands[0]) != REGNO(operands[3])"
15357 +; [(set (match_dup 2)
15358 +; (vec_concat:V2HI
15359 +; (minus:HI (match_dup 3)
15360 +; (match_dup 4))
15361 +; (plus:HI (match_dup 3) (match_dup 4))))
15362 +; (set (match_dup 1) (vec_select:HI (match_dup 2)
15363 +; (parallel [(const_int 0)])))]
15364 +;
15365 +; "operands[2] = gen_rtx_REG(V2HImode, REGNO(operands[0]));"
15366 +; )
15367 +;
15368 +;(define_peephole2
15369 +; [(set (match_operand:HI 0 "register_operand" "r")
15370 +; (minus:HI (match_operand:HI 3 "register_operand" "r")
15371 +; (match_operand:HI 4 "register_operand" "r")))
15372 +; (set (match_operand:HI 1 "register_operand" "r")
15373 +; (plus:HI (match_dup 3)
15374 +; (match_dup 4)))]
15375 +; "REGNO(operands[0]) != REGNO(operands[3])"
15376 +; [(set (match_dup 2)
15377 +; (vec_concat:V2HI
15378 +; (plus:HI (match_dup 3)
15379 +; (match_dup 4))
15380 +; (minus:HI (match_dup 3) (match_dup 4))))
15381 +; (set (match_dup 1) (vec_select:HI (match_dup 2)
15382 +; (parallel [(const_int 0)])))]
15383 +;
15384 +; "operands[2] = gen_rtx_REG(V2HImode, REGNO(operands[0]));"
15385 +; )
15386 +
15387 +
15388 +;(define_peephole2
15389 +; [(match_scratch:V2HI 5 "r")
15390 +; (set (mem:HI (plus:SI (match_operand:SI 0 "register_operand" "")
15391 +; (match_operand:HI 1 "immediate_operand" "")))
15392 +; (match_operand:HI 2 "register_operand" "r"))
15393 +; (set (mem:HI (plus:SI (match_dup 0)
15394 +; (match_operand:HI 3 "immediate_operand" "")))
15395 +; (match_operand:HI 4 "register_operand" "r"))]
15396 +; "(GET_CODE(operands[1]) == CONST_INT) && (GET_CODE(operands[3]) == CONST_INT)
15397 +; && (INTVAL(operands[3]) == (INTVAL(operands[1]) + 2))"
15398 +;
15399 +; [(set (match_dup 5)
15400 +; (vec_concat:V2HI
15401 +; (match_dup 2)
15402 +; (match_dup 4)))
15403 +; (set (mem:V2HI (plus:SI (match_dup 0) (match_dup 1)))
15404 +; (match_dup 5))]
15405 +; ""
15406 +; )
15407 +;
15408 +
15409 +;(define_insn "sthh_w"
15410 +; [(set (match_operand:V2HI 0 "avr32_sthh_w_memory_operand" "m")
15411 +; (vec_concat:V2HI
15412 +; (vec_select:HI (match_operand:V2HI 1 "register_operand" "r")
15413 +; (parallel [(match_operand 3 "immediate_operand" "i")]))
15414 +; (vec_select:HI (match_operand:V2HI 2 "register_operand" "r")
15415 +; (parallel [(match_operand 4 "immediate_operand" "i")]))))]
15416 +; "MEM_ALIGN(operands[0]) >= 32"
15417 +; "sthh.w\t%0, %1:%h3, %2:%h4"
15418 +; [(set_attr "length" "4")
15419 +; (set_attr "type" "store")])
15420 +;
15421 +;(define_peephole2
15422 +; [(set (mem:HI (plus:SI (match_operand:SI 0 "register_operand" "")
15423 +; (match_operand:HI 1 "immediate_operand" "")))
15424 +; (match_operand:HI 2 "register_operand" "r"))
15425 +; (set (mem:HI (plus:SI (match_dup 0)
15426 +; (match_operand:HI 3 "avr32_sthh_operand" "")))
15427 +; (match_operand:HI 4 "register_operand" "r"))]
15428 +; "(GET_CODE(operands[1]) == CONST_INT) && (GET_CODE(operands[3]) == CONST_INT)
15429 +; && (INTVAL(operands[3]) == (INTVAL(operands[1]) - 2))"
15430 +;
15431 +; [(paralell [(set (mem:HI (plus:SI (match_dup 0)
15432 +; (match_dup 3)))
15433 +; (match_dup 4))
15434 +; (set (mem:HI (plus:SI (match_dup 0)
15435 +; (plus:SI (match_dup 3) (const_int 2))))
15436 +; (match_dup 2))])]
15437 +; ""
15438 +; )
15439 +
15440 +
15441 +;; Load the SIMD description
15442 +(include "simd.md")
15443 +
15444 +;; Load the FP coprocessor patterns
15445 +(include "fpcp.md")
15446 diff -Nur gcc-4.1.2/gcc/config/avr32/avr32-modes.def gcc-4.1.2-owrt/gcc/config/avr32/avr32-modes.def
15447 --- gcc-4.1.2/gcc/config/avr32/avr32-modes.def 1970-01-01 01:00:00.000000000 +0100
15448 +++ gcc-4.1.2-owrt/gcc/config/avr32/avr32-modes.def 2007-05-24 12:03:28.000000000 +0200
15449 @@ -0,0 +1 @@
15450 +VECTOR_MODES (INT, 4); /* V4QI V2HI */
15451 diff -Nur gcc-4.1.2/gcc/config/avr32/avr32.opt gcc-4.1.2-owrt/gcc/config/avr32/avr32.opt
15452 --- gcc-4.1.2/gcc/config/avr32/avr32.opt 1970-01-01 01:00:00.000000000 +0100
15453 +++ gcc-4.1.2-owrt/gcc/config/avr32/avr32.opt 2007-05-24 12:03:28.000000000 +0200
15454 @@ -0,0 +1,78 @@
15455 +; Options for the ATMEL AVR32 port of the compiler.
15456 +
15457 +; Copyright 2007 Atmel Corporation.
15458 +;
15459 +; This file is part of GCC.
15460 +;
15461 +; GCC is free software; you can redistribute it and/or modify it under
15462 +; the terms of the GNU General Public License as published by the Free
15463 +; Software Foundation; either version 2, or (at your option) any later
15464 +; version.
15465 +;
15466 +; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15467 +; WARRANTY; without even the implied warranty of MERCHANTABILITY or
15468 +; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15469 +; for more details.
15470 +;
15471 +; You should have received a copy of the GNU General Public License
15472 +; along with GCC; see the file COPYING. If not, write to the Free
15473 +; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
15474 +; 02110-1301, USA.
15475 +
15476 +muse-rodata-section
15477 +Target Report Mask(USE_RODATA_SECTION)
15478 +Do not put readonly-data in .text section, but in .rodata.
15479 +
15480 +mhard-float
15481 +Target Report Mask(HARD_FLOAT)
15482 +Use floating point coprocessor instructions.
15483 +
15484 +msoft-float
15485 +Target Report InverseMask(HARD_FLOAT, SOFT_FLOAT)
15486 +Use software floating-point library for floating-point operations.
15487 +
15488 +force-double-align
15489 +Target Report RejectNegative Mask(FORCE_DOUBLE_ALIGN)
15490 +Force double-word alignment for double-word memory accesses.
15491 +
15492 +mno-init-got
15493 +Target Report RejectNegative Mask(NO_INIT_GOT)
15494 +Do not initialize GOT register before using it when compiling PIC code.
15495 +
15496 +mrelax
15497 +Target Report Mask(RELAX)
15498 +Let invoked assembler and linker do relaxing (Enabled by default when optimization level is >1).
15499 +
15500 +mno-reorg-opt
15501 +Target Report RejectNegative Mask(NO_REORG_OPT)
15502 +Do not perform machine dependent optimizations in reorg stage.
15503 +
15504 +mmd-reorg-opt
15505 +Target Report RejectNegative InverseMask(NO_REORG_OPT,MD_REORG_OPTIMIZATION)
15506 +Perform machine dependent optimizations in reorg stage.
15507 +
15508 +masm-addr-pseudos
15509 +Target Report RejectNegative InverseMask(NO_ASM_ADDR_PSEUDOS, HAS_ASM_ADDR_PSEUDOS)
15510 +Use assembler pseudo-instructions lda.w and call for handling direct addresses. (Enabled by default)
15511 +
15512 +mno-asm-addr-pseudos
15513 +Target Report RejectNegative Mask(NO_ASM_ADDR_PSEUDOS)
15514 +Do not use assembler pseudo-instructions lda.w and call for handling direct addresses.
15515 +
15516 +mno-pic
15517 +Target Report RejectNegative Mask(NO_PIC)
15518 +Do not emit position-independent code (will break dynamic linking.)
15519 +
15520 +mpart=
15521 +Target Report RejectNegative Joined Var(avr32_part_name)
15522 +Specify the AVR32 part name
15523 +
15524 +mcpu=
15525 +Target Report RejectNegative Joined Undocumented Var(avr32_part_name)
15526 +Specify the AVR32 part name (deprecated)
15527 +
15528 +march=
15529 +Target Report RejectNegative Joined Var(avr32_arch_name)
15530 +Specify the AVR32 architecture name
15531 +
15532 +
15533 diff -Nur gcc-4.1.2/gcc/config/avr32/avr32-protos.h gcc-4.1.2-owrt/gcc/config/avr32/avr32-protos.h
15534 --- gcc-4.1.2/gcc/config/avr32/avr32-protos.h 1970-01-01 01:00:00.000000000 +0100
15535 +++ gcc-4.1.2-owrt/gcc/config/avr32/avr32-protos.h 2007-05-24 12:03:28.000000000 +0200
15536 @@ -0,0 +1,175 @@
15537 +/*
15538 + Prototypes for exported functions defined in avr32.c
15539 + Copyright 2003-2006 Atmel Corporation.
15540 +
15541 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
15542 + Initial porting by Anders Ådland.
15543 +
15544 + This file is part of GCC.
15545 +
15546 + This program is free software; you can redistribute it and/or modify
15547 + it under the terms of the GNU General Public License as published by
15548 + the Free Software Foundation; either version 2 of the License, or
15549 + (at your option) any later version.
15550 +
15551 + This program is distributed in the hope that it will be useful,
15552 + but WITHOUT ANY WARRANTY; without even the implied warranty of
15553 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15554 + GNU General Public License for more details.
15555 +
15556 + You should have received a copy of the GNU General Public License
15557 + along with this program; if not, write to the Free Software
15558 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
15559 +
15560 +
15561 +#ifndef AVR32_PROTOS_H
15562 +#define AVR32_PROTOS_H
15563 +
15564 +extern const int swap_reg[];
15565 +
15566 +extern int avr32_valid_macmac_bypass (rtx, rtx);
15567 +extern int avr32_valid_mulmac_bypass (rtx, rtx);
15568 +
15569 +extern int avr32_decode_lcomm_symbol_offset (rtx, int *);
15570 +extern void avr32_encode_lcomm_symbol_offset (tree, char *, int);
15571 +
15572 +extern const char *avr32_strip_name_encoding (const char *);
15573 +
15574 +extern rtx avr32_get_note_reg_equiv (rtx insn);
15575 +
15576 +extern int avr32_use_return_insn (int iscond);
15577 +
15578 +extern void avr32_make_reglist16 (int reglist16_vect, char *reglist16_string);
15579 +
15580 +extern void avr32_make_reglist8 (int reglist8_vect, char *reglist8_string);
15581 +extern void avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string);
15582 +extern void avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string);
15583 +
15584 +extern void avr32_output_return_instruction (int single_ret_inst,
15585 + int iscond, rtx cond,
15586 + rtx r12_imm);
15587 +extern void avr32_expand_prologue (void);
15588 +extern void avr32_set_return_address (rtx source);
15589 +
15590 +extern int avr32_hard_regno_mode_ok (int regno, enum machine_mode mode);
15591 +extern int avr32_extra_constraint_s (rtx value, const int strict);
15592 +extern int avr32_eh_return_data_regno (const int n);
15593 +extern int avr32_initial_elimination_offset (const int from, const int to);
15594 +extern rtx avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
15595 + tree type, int named);
15596 +extern void avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype,
15597 + rtx libname, tree fndecl);
15598 +extern void avr32_function_arg_advance (CUMULATIVE_ARGS * cum,
15599 + enum machine_mode mode,
15600 + tree type, int named);
15601 +#ifdef ARGS_SIZE_RTX
15602 +/* expr.h defines ARGS_SIZE_RTX and `enum direction'. */
15603 +extern enum direction avr32_function_arg_padding (enum machine_mode mode,
15604 + tree type);
15605 +#endif /* ARGS_SIZE_RTX */
15606 +extern rtx avr32_function_value (tree valtype, tree func);
15607 +extern rtx avr32_libcall_value (enum machine_mode mode);
15608 +extern int avr32_sched_use_dfa_pipeline_interface (void);
15609 +extern bool avr32_return_in_memory (tree type, tree fntype);
15610 +extern void avr32_regs_to_save (char *operand);
15611 +extern void avr32_target_asm_function_prologue (FILE * file,
15612 + HOST_WIDE_INT size);
15613 +extern void avr32_target_asm_function_epilogue (FILE * file,
15614 + HOST_WIDE_INT size);
15615 +extern void avr32_trampoline_template (FILE * file);
15616 +extern void avr32_initialize_trampoline (rtx addr, rtx fnaddr,
15617 + rtx static_chain);
15618 +extern int avr32_legitimate_address (enum machine_mode mode, rtx x,
15619 + int strict);
15620 +extern int avr32_legitimate_constant_p (rtx x);
15621 +
15622 +extern int avr32_legitimate_pic_operand_p (rtx x);
15623 +
15624 +extern rtx avr32_find_symbol (rtx x);
15625 +extern void avr32_select_section (rtx exp, int reloc, int align);
15626 +extern void avr32_encode_section_info (tree decl, rtx rtl, int first);
15627 +extern void avr32_asm_file_end (FILE * stream);
15628 +extern void avr32_asm_output_ascii (FILE * stream, char *ptr, int len);
15629 +extern void avr32_asm_output_common (FILE * stream, const char *name,
15630 + int size, int rounded);
15631 +extern void avr32_asm_output_label (FILE * stream, const char *name);
15632 +extern void avr32_asm_declare_object_name (FILE * stream, char *name,
15633 + tree decl);
15634 +extern void avr32_asm_globalize_label (FILE * stream, const char *name);
15635 +extern void avr32_asm_weaken_label (FILE * stream, const char *name);
15636 +extern void avr32_asm_output_external (FILE * stream, tree decl,
15637 + const char *name);
15638 +extern void avr32_asm_output_external_libcall (FILE * stream, rtx symref);
15639 +extern void avr32_asm_output_labelref (FILE * stream, const char *name);
15640 +extern void avr32_notice_update_cc (rtx exp, rtx insn);
15641 +extern void avr32_print_operand (FILE * stream, rtx x, int code);
15642 +extern void avr32_print_operand_address (FILE * stream, rtx x);
15643 +
15644 +extern int avr32_symbol (rtx x);
15645 +
15646 +extern void avr32_select_rtx_section (enum machine_mode mode, rtx x,
15647 + unsigned HOST_WIDE_INT align);
15648 +
15649 +extern int avr32_load_multiple_operation (rtx op, enum machine_mode mode);
15650 +extern int avr32_store_multiple_operation (rtx op, enum machine_mode mode);
15651 +
15652 +extern int avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c,
15653 + const char *str);
15654 +
15655 +extern bool avr32_cannot_force_const_mem (rtx x);
15656 +
15657 +extern void avr32_init_builtins (void);
15658 +
15659 +extern rtx avr32_expand_builtin (tree exp, rtx target, rtx subtarget,
15660 + enum machine_mode mode, int ignore);
15661 +
15662 +extern bool avr32_must_pass_in_stack (enum machine_mode mode, tree type);
15663 +
15664 +extern bool avr32_strict_argument_naming (CUMULATIVE_ARGS * ca);
15665 +
15666 +extern bool avr32_pass_by_reference (CUMULATIVE_ARGS * cum,
15667 + enum machine_mode mode,
15668 + tree type, bool named);
15669 +
15670 +extern rtx avr32_gen_load_multiple (rtx * regs, int count, rtx from,
15671 + int write_back, int in_struct_p,
15672 + int scalar_p);
15673 +extern rtx avr32_gen_store_multiple (rtx * regs, int count, rtx to,
15674 + int in_struct_p, int scalar_p);
15675 +extern int avr32_gen_movmemsi (rtx * operands);
15676 +
15677 +extern int avr32_rnd_operands (rtx add, rtx shift);
15678 +extern int avr32_adjust_insn_length (rtx insn, int length);
15679 +
15680 +extern int symbol_mentioned_p (rtx x);
15681 +extern int label_mentioned_p (rtx x);
15682 +extern rtx legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg);
15683 +extern int avr32_address_register_rtx_p (rtx x, int strict_p);
15684 +extern int avr32_legitimate_index_p (enum machine_mode mode, rtx index,
15685 + int strict_p);
15686 +
15687 +extern int avr32_const_double_immediate (rtx value);
15688 +extern void avr32_init_expanders (void);
15689 +extern rtx avr32_return_addr (int count, rtx frame);
15690 +extern bool avr32_got_mentioned_p (rtx addr);
15691 +
15692 +extern void avr32_final_prescan_insn (rtx insn, rtx * opvec, int noperands);
15693 +
15694 +extern int avr32_expand_movcc (enum machine_mode mode, rtx operands[]);
15695 +extern int avr32_expand_addcc (enum machine_mode mode, rtx operands[]);
15696 +#ifdef RTX_CODE
15697 +extern int avr32_expand_scc (RTX_CODE cond, rtx * operands);
15698 +#endif
15699 +
15700 +extern int avr32_store_bypass (rtx insn_out, rtx insn_in);
15701 +extern int avr32_mul_waw_bypass (rtx insn_out, rtx insn_in);
15702 +extern int avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in);
15703 +extern int avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in);
15704 +extern rtx avr32_output_cmp (rtx cond, enum machine_mode mode,
15705 + rtx op0, rtx op1);
15706 +
15707 +rtx get_next_insn_cond (rtx cur_insn);
15708 +int set_next_insn_cond (rtx cur_insn, rtx cond);
15709 +void avr32_override_options (void);
15710 +
15711 +#endif /* AVR32_PROTOS_H */
15712 diff -Nur gcc-4.1.2/gcc/config/avr32/crti.asm gcc-4.1.2-owrt/gcc/config/avr32/crti.asm
15713 --- gcc-4.1.2/gcc/config/avr32/crti.asm 1970-01-01 01:00:00.000000000 +0100
15714 +++ gcc-4.1.2-owrt/gcc/config/avr32/crti.asm 2007-05-24 12:03:28.000000000 +0200
15715 @@ -0,0 +1,64 @@
15716 +/*
15717 + Init/fini stuff for AVR32.
15718 + Copyright 2003-2006 Atmel Corporation.
15719 +
15720 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
15721 +
15722 + This file is part of GCC.
15723 +
15724 + This program is free software; you can redistribute it and/or modify
15725 + it under the terms of the GNU General Public License as published by
15726 + the Free Software Foundation; either version 2 of the License, or
15727 + (at your option) any later version.
15728 +
15729 + This program is distributed in the hope that it will be useful,
15730 + but WITHOUT ANY WARRANTY; without even the implied warranty of
15731 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15732 + GNU General Public License for more details.
15733 +
15734 + You should have received a copy of the GNU General Public License
15735 + along with this program; if not, write to the Free Software
15736 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
15737 +
15738 +
15739 +/* The code in sections .init and .fini is supposed to be a single
15740 + regular function. The function in .init is called directly from
15741 + start in crt1.asm. The function in .fini is atexit()ed in crt1.asm
15742 + too.
15743 +
15744 + crti.asm contributes the prologue of a function to these sections,
15745 + and crtn.asm comes up the epilogue. STARTFILE_SPEC should list
15746 + crti.o before any other object files that might add code to .init
15747 + or .fini sections, and ENDFILE_SPEC should list crtn.o after any
15748 + such object files. */
15749 +
15750 + .file "crti.asm"
15751 +
15752 + .section ".init"
15753 +/* Just load the GOT */
15754 + .align 2
15755 + .global _init
15756 +_init:
15757 + stm --sp, r6, lr
15758 + lddpc r6, 1f
15759 +0:
15760 + rsub r6, pc
15761 + rjmp 2f
15762 + .align 2
15763 +1: .long 0b - _GLOBAL_OFFSET_TABLE_
15764 +2:
15765 +
15766 + .section ".fini"
15767 +/* Just load the GOT */
15768 + .align 2
15769 + .global _fini
15770 +_fini:
15771 + stm --sp, r6, lr
15772 + lddpc r6, 1f
15773 +0:
15774 + rsub r6, pc
15775 + rjmp 2f
15776 + .align 2
15777 +1: .long 0b - _GLOBAL_OFFSET_TABLE_
15778 +2:
15779 +
15780 diff -Nur gcc-4.1.2/gcc/config/avr32/crtn.asm gcc-4.1.2-owrt/gcc/config/avr32/crtn.asm
15781 --- gcc-4.1.2/gcc/config/avr32/crtn.asm 1970-01-01 01:00:00.000000000 +0100
15782 +++ gcc-4.1.2-owrt/gcc/config/avr32/crtn.asm 2007-05-24 12:03:28.000000000 +0200
15783 @@ -0,0 +1,44 @@
15784 +/* Copyright (C) 2001 Free Software Foundation, Inc.
15785 + Written By Nick Clifton
15786 +
15787 + This file is free software; you can redistribute it and/or modify it
15788 + under the terms of the GNU General Public License as published by the
15789 + Free Software Foundation; either version 2, or (at your option) any
15790 + later version.
15791 +
15792 + In addition to the permissions in the GNU General Public License, the
15793 + Free Software Foundation gives you unlimited permission to link the
15794 + compiled version of this file with other programs, and to distribute
15795 + those programs without any restriction coming from the use of this
15796 + file. (The General Public License restrictions do apply in other
15797 + respects; for example, they cover modification of the file, and
15798 + distribution when not linked into another program.)
15799 +
15800 + This file is distributed in the hope that it will be useful, but
15801 + WITHOUT ANY WARRANTY; without even the implied warranty of
15802 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15803 + General Public License for more details.
15804 +
15805 + You should have received a copy of the GNU General Public License
15806 + along with this program; see the file COPYING. If not, write to
15807 + the Free Software Foundation, 59 Temple Place - Suite 330,
15808 + Boston, MA 02111-1307, USA.
15809 +
15810 + As a special exception, if you link this library with files
15811 + compiled with GCC to produce an executable, this does not cause
15812 + the resulting executable to be covered by the GNU General Public License.
15813 + This exception does not however invalidate any other reasons why
15814 + the executable file might be covered by the GNU General Public License.
15815 +*/
15816 +
15817 +
15818 +
15819 +
15820 + .file "crtn.asm"
15821 +
15822 + .section ".init"
15823 + ldm sp++, r6, pc
15824 +
15825 + .section ".fini"
15826 + ldm sp++, r6, pc
15827 +
15828 diff -Nur gcc-4.1.2/gcc/config/avr32/fpcp.md gcc-4.1.2-owrt/gcc/config/avr32/fpcp.md
15829 --- gcc-4.1.2/gcc/config/avr32/fpcp.md 1970-01-01 01:00:00.000000000 +0100
15830 +++ gcc-4.1.2-owrt/gcc/config/avr32/fpcp.md 2007-05-24 12:03:28.000000000 +0200
15831 @@ -0,0 +1,551 @@
15832 +;; AVR32 machine description file for Floating-Point instructions.
15833 +;; Copyright 2003-2006 Atmel Corporation.
15834 +;;
15835 +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
15836 +;;
15837 +;; This file is part of GCC.
15838 +;;
15839 +;; This program is free software; you can redistribute it and/or modify
15840 +;; it under the terms of the GNU General Public License as published by
15841 +;; the Free Software Foundation; either version 2 of the License, or
15842 +;; (at your option) any later version.
15843 +;;
15844 +;; This program is distributed in the hope that it will be useful,
15845 +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
15846 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15847 +;; GNU General Public License for more details.
15848 +;;
15849 +;; You should have received a copy of the GNU General Public License
15850 +;; along with this program; if not, write to the Free Software
15851 +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15852 +
15853 +;; -*- Mode: Scheme -*-
15854 +
15855 +;;******************************************************************************
15856 +;; Automaton pipeline description for floating-point coprocessor insns
15857 +;;******************************************************************************
15858 +(define_cpu_unit "fid,fm1,fm2,fm3,fm4,fwb,fcmp,fcast" "avr32_ap")
15859 +
15860 +(define_insn_reservation "fmv_op" 1
15861 + (and (eq_attr "pipeline" "ap")
15862 + (eq_attr "type" "fmv"))
15863 + "is,da,d,fid,fwb")
15864 +
15865 +(define_insn_reservation "fmul_op" 5
15866 + (and (eq_attr "pipeline" "ap")
15867 + (eq_attr "type" "fmul"))
15868 + "is,da,d,fid,fm1,fm2,fm3,fm4,fwb")
15869 +
15870 +(define_insn_reservation "fcmps_op" 1
15871 + (and (eq_attr "pipeline" "ap")
15872 + (eq_attr "type" "fcmps"))
15873 + "is,da,d,fid,fcmp")
15874 +
15875 +(define_insn_reservation "fcmpd_op" 2
15876 + (and (eq_attr "pipeline" "ap")
15877 + (eq_attr "type" "fcmpd"))
15878 + "is,da,d,fid*2,fcmp")
15879 +
15880 +(define_insn_reservation "fcast_op" 3
15881 + (and (eq_attr "pipeline" "ap")
15882 + (eq_attr "type" "fcast"))
15883 + "is,da,d,fid,fcmp,fcast,fwb")
15884 +
15885 +(define_insn_reservation "fmvcpu_op" 2
15886 + (and (eq_attr "pipeline" "ap")
15887 + (eq_attr "type" "fmvcpu"))
15888 + "is,da,d")
15889 +
15890 +(define_insn_reservation "fldd_op" 1
15891 + (and (eq_attr "pipeline" "ap")
15892 + (eq_attr "type" "fldd"))
15893 + "is,da,d,fwb")
15894 +
15895 +(define_insn_reservation "flds_op" 1
15896 + (and (eq_attr "pipeline" "ap")
15897 + (eq_attr "type" "flds"))
15898 + "is,da,d,fwb")
15899 +
15900 +(define_insn_reservation "fsts_op" 0
15901 + (and (eq_attr "pipeline" "ap")
15902 + (eq_attr "type" "fsts"))
15903 + "is,da*2,d")
15904 +
15905 +(define_insn_reservation "fstd_op" 0
15906 + (and (eq_attr "pipeline" "ap")
15907 + (eq_attr "type" "fstd"))
15908 + "is,da*2,d")
15909 +
15910 +
15911 +(define_insn "*movsf_fpcp"
15912 + [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,r,f,m,r,r,r,m")
15913 + (match_operand:SF 1 "general_operand" " f,r,f,m,f,r,G,m,r"))]
15914 + "TARGET_HARD_FLOAT"
15915 + "@
15916 + fmov.s\t%0, %1
15917 + fmov.s\t%0, %1
15918 + fmov.s\t%0, %1
15919 + fld.s\t%0, %1
15920 + fst.s\t%0, %1
15921 + mov\t%0, %1
15922 + mov\t%0, %1
15923 + ld.w\t%0, %1
15924 + st.w\t%0, %1"
15925 + [(set_attr "length" "4,4,4,4,4,2,4,4,4")
15926 + (set_attr "type" "fmv,flds,fmvcpu,flds,fsts,alu,alu,load,store")])
15927 +
15928 +(define_insn_and_split "*movdf_fpcp"
15929 + [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,r,f,m,r,r,m")
15930 + (match_operand:DF 1 "general_operand" " f,r,f,m,f,r,m,r"))]
15931 + "TARGET_HARD_FLOAT"
15932 + "@
15933 + fmov.d\t%0, %1
15934 + fmov.d\t%0, %1
15935 + fmov.d\t%0, %1
15936 + fld.d\t%0, %1
15937 + fst.d\t%0, %1
15938 + mov\t%0, %1\;mov\t%m0, %m1
15939 + ld.d\t%0, %1
15940 + st.d\t%0, %1"
15941 +
15942 + "TARGET_HARD_FLOAT
15943 + && reload_completed
15944 + && (REG_P(operands[0]) && (REGNO_REG_CLASS(REGNO(operands[0])) == GENERAL_REGS))
15945 + && (REG_P(operands[1]) && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS))"
15946 + [(set (match_dup 0) (match_dup 1))
15947 + (set (match_dup 2) (match_dup 3))]
15948 + "
15949 + {
15950 + operands[2] = gen_highpart (SImode, operands[0]);
15951 + operands[0] = gen_lowpart (SImode, operands[0]);
15952 + operands[3] = gen_highpart(SImode, operands[1]);
15953 + operands[1] = gen_lowpart(SImode, operands[1]);
15954 + }
15955 + "
15956 +
15957 + [(set_attr "length" "4,4,4,4,4,4,4,4")
15958 + (set_attr "type" "fmv,fldd,fmvcpu,fldd,fstd,alu2,load2,store2")])
15959 +
15960 +
15961 +(define_insn "mulsf3"
15962 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
15963 + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
15964 + (match_operand:SF 2 "avr32_fp_register_operand" "f")))]
15965 + "TARGET_HARD_FLOAT"
15966 + "fmul.s\t%0, %1, %2"
15967 + [(set_attr "length" "4")
15968 + (set_attr "type" "fmul")])
15969 +
15970 +(define_insn "nmulsf3"
15971 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
15972 + (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
15973 + (match_operand:SF 2 "avr32_fp_register_operand" "f"))))]
15974 + "TARGET_HARD_FLOAT"
15975 + "fnmul.s\t%0, %1, %2"
15976 + [(set_attr "length" "4")
15977 + (set_attr "type" "fmul")])
15978 +
15979 +(define_peephole2
15980 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "")
15981 + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "")
15982 + (match_operand:SF 2 "avr32_fp_register_operand" "")))
15983 + (set (match_operand:SF 3 "avr32_fp_register_operand" "")
15984 + (neg:SF (match_dup 0)))]
15985 + "TARGET_HARD_FLOAT &&
15986 + (peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[3]) == REGNO(operands[0])))"
15987 + [(set (match_dup 3)
15988 + (neg:SF (mult:SF (match_dup 1)
15989 + (match_dup 2))))]
15990 +)
15991 +
15992 +
15993 +(define_insn "macsf3"
15994 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
15995 + (plus:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
15996 + (match_operand:SF 2 "avr32_fp_register_operand" "f"))
15997 + (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
15998 + "TARGET_HARD_FLOAT"
15999 + "fmac.s\t%0, %1, %2"
16000 + [(set_attr "length" "4")
16001 + (set_attr "type" "fmul")])
16002 +
16003 +(define_insn "nmacsf3"
16004 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16005 + (plus:SF (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16006 + (match_operand:SF 2 "avr32_fp_register_operand" "f")))
16007 + (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
16008 + "TARGET_HARD_FLOAT"
16009 + "fnmac.s\t%0, %1, %2"
16010 + [(set_attr "length" "4")
16011 + (set_attr "type" "fmul")])
16012 +
16013 +(define_peephole2
16014 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "")
16015 + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "")
16016 + (match_operand:SF 2 "avr32_fp_register_operand" "")))
16017 + (set (match_operand:SF 3 "avr32_fp_register_operand" "")
16018 + (minus:SF
16019 + (match_dup 3)
16020 + (match_dup 0)))]
16021 + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
16022 + [(set (match_dup 3)
16023 + (plus:SF (neg:SF (mult:SF (match_dup 1)
16024 + (match_dup 2)))
16025 + (match_dup 3)))]
16026 +)
16027 +
16028 +
16029 +(define_insn "msubacsf3"
16030 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16031 + (minus:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16032 + (match_operand:SF 2 "avr32_fp_register_operand" "f"))
16033 + (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
16034 + "TARGET_HARD_FLOAT"
16035 + "fmsc.s\t%0, %1, %2"
16036 + [(set_attr "length" "4")
16037 + (set_attr "type" "fmul")])
16038 +
16039 +(define_peephole2
16040 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "")
16041 + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "")
16042 + (match_operand:SF 2 "avr32_fp_register_operand" "")))
16043 + (set (match_operand:SF 3 "avr32_fp_register_operand" "")
16044 + (minus:SF
16045 + (match_dup 0)
16046 + (match_dup 3)))]
16047 + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
16048 + [(set (match_dup 3)
16049 + (minus:SF (mult:SF (match_dup 1)
16050 + (match_dup 2))
16051 + (match_dup 3)))]
16052 +)
16053 +
16054 +(define_insn "nmsubacsf3"
16055 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16056 + (minus:SF (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16057 + (match_operand:SF 2 "avr32_fp_register_operand" "f")))
16058 + (match_operand:SF 3 "avr32_fp_register_operand" "0")))]
16059 + "TARGET_HARD_FLOAT"
16060 + "fnmsc.s\t%0, %1, %2"
16061 + [(set_attr "length" "4")
16062 + (set_attr "type" "fmul")])
16063 +
16064 +
16065 +
16066 +(define_insn "addsf3"
16067 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16068 + (plus:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16069 + (match_operand:SF 2 "avr32_fp_register_operand" "f")))]
16070 + "TARGET_HARD_FLOAT"
16071 + "fadd.s\t%0, %1, %2"
16072 + [(set_attr "length" "4")
16073 + (set_attr "type" "fmul")])
16074 +
16075 +(define_insn "subsf3"
16076 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16077 + (minus:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")
16078 + (match_operand:SF 2 "avr32_fp_register_operand" "f")))]
16079 + "TARGET_HARD_FLOAT"
16080 + "fsub.s\t%0, %1, %2"
16081 + [(set_attr "length" "4")
16082 + (set_attr "type" "fmul")])
16083 +
16084 +
16085 +(define_insn "negsf2"
16086 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16087 + (neg:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
16088 + "TARGET_HARD_FLOAT"
16089 + "fneg.s\t%0, %1"
16090 + [(set_attr "length" "4")
16091 + (set_attr "type" "fmv")])
16092 +
16093 +(define_insn "abssf2"
16094 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16095 + (abs:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
16096 + "TARGET_HARD_FLOAT"
16097 + "fabs.s\t%0, %1"
16098 + [(set_attr "length" "4")
16099 + (set_attr "type" "fmv")])
16100 +
16101 +(define_insn "truncdfsf2"
16102 + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f")
16103 + (float_truncate:SF
16104 + (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
16105 + "TARGET_HARD_FLOAT"
16106 + "fcastd.s\t%0, %1"
16107 + [(set_attr "length" "4")
16108 + (set_attr "type" "fcast")])
16109 +
16110 +(define_insn "extendsfdf2"
16111 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16112 + (float_extend:DF
16113 + (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
16114 + "TARGET_HARD_FLOAT"
16115 + "fcasts.d\t%0, %1"
16116 + [(set_attr "length" "4")
16117 + (set_attr "type" "fcast")])
16118 +
16119 +(define_insn "muldf3"
16120 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16121 + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
16122 + (match_operand:DF 2 "avr32_fp_register_operand" "f")))]
16123 + "TARGET_HARD_FLOAT"
16124 + "fmul.d\t%0, %1, %2"
16125 + [(set_attr "length" "4")
16126 + (set_attr "type" "fmul")])
16127 +
16128 +(define_insn "nmuldf3"
16129 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16130 + (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
16131 + (match_operand:DF 2 "avr32_fp_register_operand" "f"))))]
16132 + "TARGET_HARD_FLOAT"
16133 + "fnmul.d\t%0, %1, %2"
16134 + [(set_attr "length" "4")
16135 + (set_attr "type" "fmul")])
16136 +
16137 +(define_peephole2
16138 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "")
16139 + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "")
16140 + (match_operand:DF 2 "avr32_fp_register_operand" "")))
16141 + (set (match_operand:DF 3 "avr32_fp_register_operand" "")
16142 + (neg:DF (match_dup 0)))]
16143 + "TARGET_HARD_FLOAT &&
16144 + (peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[3]) == REGNO(operands[0])))"
16145 + [(set (match_dup 3)
16146 + (neg:DF (mult:DF (match_dup 1)
16147 + (match_dup 2))))]
16148 +)
16149 +
16150 +(define_insn "macdf3"
16151 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16152 + (plus:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
16153 + (match_operand:DF 2 "avr32_fp_register_operand" "f"))
16154 + (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
16155 + "TARGET_HARD_FLOAT"
16156 + "fmac.d\t%0, %1, %2"
16157 + [(set_attr "length" "4")
16158 + (set_attr "type" "fmul")])
16159 +
16160 +(define_insn "msubacdf3"
16161 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16162 + (minus:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
16163 + (match_operand:DF 2 "avr32_fp_register_operand" "f"))
16164 + (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
16165 + "TARGET_HARD_FLOAT"
16166 + "fmsc.d\t%0, %1, %2"
16167 + [(set_attr "length" "4")
16168 + (set_attr "type" "fmul")])
16169 +
16170 +(define_peephole2
16171 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "")
16172 + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "")
16173 + (match_operand:DF 2 "avr32_fp_register_operand" "")))
16174 + (set (match_operand:DF 3 "avr32_fp_register_operand" "")
16175 + (minus:DF
16176 + (match_dup 0)
16177 + (match_dup 3)))]
16178 + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
16179 + [(set (match_dup 3)
16180 + (minus:DF (mult:DF (match_dup 1)
16181 + (match_dup 2))
16182 + (match_dup 3)))]
16183 + )
16184 +
16185 +(define_insn "nmsubacdf3"
16186 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16187 + (minus:DF (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
16188 + (match_operand:DF 2 "avr32_fp_register_operand" "f")))
16189 + (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
16190 + "TARGET_HARD_FLOAT"
16191 + "fnmsc.d\t%0, %1, %2"
16192 + [(set_attr "length" "4")
16193 + (set_attr "type" "fmul")])
16194 +
16195 +(define_insn "nmacdf3"
16196 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16197 + (plus:DF (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
16198 + (match_operand:DF 2 "avr32_fp_register_operand" "f")))
16199 + (match_operand:DF 3 "avr32_fp_register_operand" "0")))]
16200 + "TARGET_HARD_FLOAT"
16201 + "fnmac.d\t%0, %1, %2"
16202 + [(set_attr "length" "4")
16203 + (set_attr "type" "fmul")])
16204 +
16205 +(define_peephole2
16206 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "")
16207 + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "")
16208 + (match_operand:DF 2 "avr32_fp_register_operand" "")))
16209 + (set (match_operand:DF 3 "avr32_fp_register_operand" "")
16210 + (minus:DF
16211 + (match_dup 3)
16212 + (match_dup 0)))]
16213 + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])"
16214 + [(set (match_dup 3)
16215 + (plus:DF (neg:DF (mult:DF (match_dup 1)
16216 + (match_dup 2)))
16217 + (match_dup 3)))]
16218 +)
16219 +
16220 +(define_insn "adddf3"
16221 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16222 + (plus:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
16223 + (match_operand:DF 2 "avr32_fp_register_operand" "f")))]
16224 + "TARGET_HARD_FLOAT"
16225 + "fadd.d\t%0, %1, %2"
16226 + [(set_attr "length" "4")
16227 + (set_attr "type" "fmul")])
16228 +
16229 +(define_insn "subdf3"
16230 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16231 + (minus:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")
16232 + (match_operand:DF 2 "avr32_fp_register_operand" "f")))]
16233 + "TARGET_HARD_FLOAT"
16234 + "fsub.d\t%0, %1, %2"
16235 + [(set_attr "length" "4")
16236 + (set_attr "type" "fmul")])
16237 +
16238 +(define_insn "negdf2"
16239 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16240 + (neg:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
16241 + "TARGET_HARD_FLOAT"
16242 + "fneg.d\t%0, %1"
16243 + [(set_attr "length" "4")
16244 + (set_attr "type" "fmv")])
16245 +
16246 +(define_insn "absdf2"
16247 + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f")
16248 + (abs:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
16249 + "TARGET_HARD_FLOAT"
16250 + "fabs.d\t%0, %1"
16251 + [(set_attr "length" "4")
16252 + (set_attr "type" "fmv")])
16253 +
16254 +
16255 +(define_expand "cmpdf"
16256 + [(set (cc0)
16257 + (compare:DF
16258 + (match_operand:DF 0 "general_operand" "")
16259 + (match_operand:DF 1 "general_operand" "")))]
16260 + "TARGET_HARD_FLOAT"
16261 + "{
16262 + rtx tmpreg;
16263 + if ( !REG_P(operands[0]) )
16264 + operands[0] = force_reg(DFmode, operands[0]);
16265 +
16266 + if ( !REG_P(operands[1]) )
16267 + operands[1] = force_reg(DFmode, operands[1]);
16268 +
16269 + avr32_compare_op0 = operands[0];
16270 + avr32_compare_op1 = operands[1];
16271 +
16272 + emit_insn(gen_cmpdf_internal(operands[0], operands[1]));
16273 +
16274 + tmpreg = gen_reg_rtx(SImode);
16275 + emit_insn(gen_fpcc_to_reg(tmpreg));
16276 + emit_insn(gen_reg_to_cc(tmpreg));
16277 +
16278 + DONE;
16279 + }"
16280 +)
16281 +
16282 +(define_insn "cmpdf_internal"
16283 + [(set (reg:CC FPCC_REGNUM)
16284 + (compare:CC
16285 + (match_operand:DF 0 "avr32_fp_register_operand" "f")
16286 + (match_operand:DF 1 "avr32_fp_register_operand" "f")))]
16287 + "TARGET_HARD_FLOAT"
16288 + {
16289 + if (!rtx_equal_p(cc_prev_status.mdep.fpvalue, SET_SRC(PATTERN (insn))) )
16290 + return "fcmp.d\t%0, %1";
16291 + return "";
16292 + }
16293 + [(set_attr "length" "4")
16294 + (set_attr "type" "fcmpd")
16295 + (set_attr "cc" "fpcompare")])
16296 +
16297 +(define_expand "cmpsf"
16298 + [(set (cc0)
16299 + (compare:SF
16300 + (match_operand:SF 0 "general_operand" "")
16301 + (match_operand:SF 1 "general_operand" "")))]
16302 + "TARGET_HARD_FLOAT"
16303 + "{
16304 + rtx tmpreg;
16305 + if ( !REG_P(operands[0]) )
16306 + operands[0] = force_reg(SFmode, operands[0]);
16307 +
16308 + if ( !REG_P(operands[1]) )
16309 + operands[1] = force_reg(SFmode, operands[1]);
16310 +
16311 + avr32_compare_op0 = operands[0];
16312 + avr32_compare_op1 = operands[1];
16313 +
16314 + emit_insn(gen_cmpsf_internal(operands[0], operands[1]));
16315 +
16316 + tmpreg = gen_reg_rtx(SImode);
16317 + emit_insn(gen_fpcc_to_reg(tmpreg));
16318 + emit_insn(gen_reg_to_cc(tmpreg));
16319 +
16320 + DONE;
16321 + }"
16322 +)
16323 +
16324 +(define_insn "cmpsf_internal"
16325 + [(set (reg:CC FPCC_REGNUM)
16326 + (compare:CC
16327 + (match_operand:SF 0 "avr32_fp_register_operand" "f")
16328 + (match_operand:SF 1 "avr32_fp_register_operand" "f")))]
16329 + "TARGET_HARD_FLOAT"
16330 + {
16331 + if (!rtx_equal_p(cc_prev_status.mdep.fpvalue, SET_SRC(PATTERN (insn))) )
16332 + return "fcmp.s\t%0, %1";
16333 + return "";
16334 + }
16335 + [(set_attr "length" "4")
16336 + (set_attr "type" "fcmps")
16337 + (set_attr "cc" "fpcompare")])
16338 +
16339 +(define_insn "fpcc_to_reg"
16340 + [(set (match_operand:SI 0 "register_operand" "=r")
16341 + (unspec:SI [(reg:CC FPCC_REGNUM)]
16342 + UNSPEC_FPCC_TO_REG))]
16343 + "TARGET_HARD_FLOAT"
16344 + "fmov.s\t%0, fsr"
16345 + [(set_attr "length" "4")
16346 + (set_attr "type" "fmvcpu")])
16347 +
16348 +(define_insn "reg_to_cc"
16349 + [(set (cc0)
16350 + (unspec:SI [(match_operand:SI 0 "register_operand" "r")]
16351 + UNSPEC_REG_TO_CC))]
16352 + "TARGET_HARD_FLOAT"
16353 + "musfr\t%0"
16354 + [(set_attr "length" "2")
16355 + (set_attr "type" "alu")
16356 + (set_attr "cc" "from_fpcc")])
16357 +
16358 +(define_insn "stm_fp"
16359 + [(unspec [(match_operand 0 "register_operand" "r")
16360 + (match_operand 1 "const_int_operand" "")
16361 + (match_operand 2 "const_int_operand" "")]
16362 + UNSPEC_STMFP)]
16363 + "TARGET_HARD_FLOAT"
16364 + {
16365 + int cop_reglist = INTVAL(operands[1]);
16366 +
16367 + if (INTVAL(operands[2]) != 0)
16368 + return "stcm.w\tcp0, --%0, %C1";
16369 + else
16370 + return "stcm.w\tcp0, %0, %C1";
16371 +
16372 + if ( cop_reglist & ~0xff ){
16373 + operands[1] = GEN_INT(cop_reglist & ~0xff);
16374 + if (INTVAL(operands[2]) != 0)
16375 + return "stcm.d\tcp0, --%0, %D1";
16376 + else
16377 + return "stcm.d\tcp0, %0, %D1";
16378 + }
16379 + }
16380 + [(set_attr "type" "fstm")
16381 + (set_attr "length" "4")
16382 + (set_attr "cc" "none")])
16383 diff -Nur gcc-4.1.2/gcc/config/avr32/lib1funcs.S gcc-4.1.2-owrt/gcc/config/avr32/lib1funcs.S
16384 --- gcc-4.1.2/gcc/config/avr32/lib1funcs.S 1970-01-01 01:00:00.000000000 +0100
16385 +++ gcc-4.1.2-owrt/gcc/config/avr32/lib1funcs.S 2007-05-24 12:03:28.000000000 +0200
16386 @@ -0,0 +1,1678 @@
16387 +/*#define __IEEE_LARGE_FLOATS__*/
16388 +
16389 +/* Adjust the unpacked double number if it is a subnormal number.
16390 + The exponent and mantissa pair are stored
16391 + in [mant_hi,mant_lo] and [exp]. A register with the correct sign bit in
16392 + the MSB is passed in [sign]. Needs two scratch
16393 + registers [scratch1] and [scratch2]. An adjusted and packed double float
16394 + is present in [mant_hi,mant_lo] after macro has executed */
16395 +.macro adjust_subnormal_df exp, mant_lo, mant_hi, sign, scratch1, scratch2
16396 + /* We have an exponent which is <=0 indicating a subnormal number
16397 + As it should be stored as if the exponent was 1 (although the
16398 + exponent field is all zeros to indicate a subnormal number)
16399 + we have to shift down the mantissa to its correct position. */
16400 + neg \exp
16401 + sub \exp,-1 /* amount to shift down */
16402 + cp.w \exp,54
16403 + brlo 50f /* if more than 53 shift steps, the
16404 + entire mantissa will disappear
16405 + without any rounding to occur */
16406 + mov \mant_hi, 0
16407 + mov \mant_lo, 0
16408 + rjmp 52f
16409 +50:
16410 + sub \exp,-10 /* do the shift to position the
16411 + mantissa at the same time
16412 + note! this does not include the
16413 + final 1 step shift to add the sign */
16414 +
16415 + /* when shifting, save all shifted out bits in [scratch2]. we may need to
16416 + look at them to make correct rounding. */
16417 +
16418 + rsub \scratch1,\exp,32 /* get inverted shift count */
16419 + cp.w \exp,32 /* handle shifts >= 32 separately */
16420 + brhs 51f
16421 +
16422 + /* small (<32) shift amount, both words are part of the shift */
16423 + lsl \scratch2,\mant_lo,\scratch1 /* save bits to shift out from lsw*/
16424 + lsl \scratch1,\mant_hi,\scratch1 /* get bits from msw destined for lsw*/
16425 + lsr \mant_lo,\mant_lo,\exp /* shift down lsw */
16426 + lsr \mant_hi,\mant_hi,\exp /* shift down msw */
16427 + or \mant_hi,\scratch1 /* add bits from msw with prepared lsw */
16428 + rjmp 50f
16429 +
16430 + /* large (>=32) shift amount, only lsw will have bits left after shift.
16431 + note that shift operations will use ((shift count) mod 32) so
16432 + we do not need to subtract 32 from shift count. */
16433 +51:
16434 + lsl \scratch2,\mant_hi,\scratch1 /* save bits to shift out from msw */
16435 + or \scratch2,\mant_lo /* also save all bits from lsw */
16436 + mov \mant_lo,\mant_hi /* msw -> lsw (i.e. "shift 32 first") */
16437 + mov \mant_hi,0 /* clear msw */
16438 + lsr \mant_lo,\mant_lo,\exp /* make rest of shift inside lsw */
16439 +
16440 +50:
16441 + /* result is almost ready to return, except that least significant bit
16442 + and the part we already shifted out may cause the result to be
16443 + rounded */
16444 + bld \mant_lo,0 /* get bit to be shifted out */
16445 + brcc 51f /* if bit was 0, no rounding */
16446 +
16447 + /* msb of part to remove is 1, so rounding depends on rest of bits */
16448 + tst \scratch2,\scratch2 /* get shifted out tail */
16449 + brne 50f /* if rest > 0, do round */
16450 + bld \mant_lo,1 /* we have to look at lsb in result */
16451 + brcc 51f /* if lsb is 0, don't round */
16452 +
16453 +50:
16454 + /* subnormal result requires rounding
16455 + rounding may cause subnormal to become smallest normal number
16456 + luckily, smallest normal number has exactly the representation
16457 + we got by rippling a one bit up from mantissa into exponent field. */
16458 + sub \mant_lo,-1
16459 + subcc \mant_hi,-1
16460 +
16461 +51:
16462 + /* shift and return packed double with correct sign */
16463 + rol \sign
16464 + ror \mant_hi
16465 + ror \mant_lo
16466 +52:
16467 +.endm
16468 +
16469 +
16470 +/* Adjust subnormal single float number with exponent [exp]
16471 + and mantissa [mant] and round. */
16472 +.macro adjust_subnormal_sf sf, exp, mant, sign, scratch
16473 + /* subnormal number */
16474 + rsub \exp,\exp, 1 /* shift amount */
16475 + cp.w \exp, 25
16476 + movhs \mant, 0
16477 + brhs 90f /* Return zero */
16478 + rsub \scratch, \exp, 32
16479 + lsl \scratch, \mant,\scratch/* Check if there are any bits set
16480 + in the bits discarded in the mantissa */
16481 + srne \scratch /* If so set the lsb of the shifted mantissa */
16482 + lsr \mant,\mant,\exp /* Shift the mantissa */
16483 + or \mant, \scratch /* Round lsb if any bits were shifted out */
16484 + /* Rounding : For explaination, see round_sf. */
16485 + mov \scratch, 0x7f /* Set rounding constant */
16486 + bld \mant, 8
16487 + subeq \scratch, -1 /* For odd numbers use rounding constant 0x80 */
16488 + add \mant, \scratch /* Add rounding constant to mantissa */
16489 + /* We can't overflow because mantissa is at least shifted one position
16490 + to the right so the implicit bit is zero. We can however get the implicit
16491 + bit set after rounding which means that we have the lowest normal number
16492 + but this is ok since this bit has the same position as the LSB of the
16493 + exponent */
16494 + lsr \sf, \mant, 7
16495 + /* Rotate in sign */
16496 + lsl \sign, 1
16497 + ror \sf
16498 +90:
16499 +.endm
16500 +
16501 +
16502 +/* Round the unpacked df number with exponent [exp] and
16503 + mantissa [mant_hi, mant_lo]. Uses scratch register
16504 + [scratch] */
16505 +.macro round_df exp, mant_lo, mant_hi, scratch
16506 + mov \scratch, 0x3ff /* Rounding constant */
16507 + bld \mant_lo,11 /* Check if lsb in the final result is
16508 + set */
16509 + subeq \scratch, -1 /* Adjust rounding constant to 0x400
16510 + if rounding 0.5 upwards */
16511 + add \mant_lo, \scratch /* Round */
16512 + acr \mant_hi /* If overflowing we know that
16513 + we have all zeros in the bits not
16514 + scaled out so we can leave them
16515 + but we must increase the exponent with
16516 + two since we had an implicit bit
16517 + which is lost + the extra overflow bit */
16518 + subcs \exp, -2 /* Update exponent */
16519 +.endm
16520 +
16521 +/* Round single float number stored in [mant] and [exp] */
16522 +.macro round_sf exp, mant, scratch
16523 + /* Round:
16524 + For 0.5 we round to nearest even integer
16525 + for all other cases we round to nearest integer.
16526 + This means that if the digit left of the "point" (.)
16527 + is 1 we can add 0x80 to the mantissa since the
16528 + corner case 0x180 will round up to 0x200. If the
16529 + digit left of the "point" is 0 we will have to
16530 + add 0x7f since this will give 0xff and hence a
16531 + truncation/rounding downwards for the corner
16532 + case when the 9 lowest bits are 0x080 */
16533 + mov \scratch, 0x7f /* Set rounding constant */
16534 + /* Check if the mantissa is even or odd */
16535 + bld \mant, 8
16536 + subeq \scratch, -1 /* Rounding constant should be 0x80 */
16537 + add \mant, \scratch
16538 + subcs \exp, -2 /* Adjust exponent if we overflowed */
16539 +.endm
16540 +
16541 +/* Scale mantissa [mant_hi, mant_lo] with amount [shift_count].
16542 + Uses scratch registers [scratch1] and [scratch2] */
16543 +.macro scale_df shift_count, mant_lo, mant_hi, scratch1, scratch2
16544 + /* Scale [mant_hi, mant_lo] with shift_amount.
16545 + Must not forget the sticky bits we intend to shift out. */
16546 +
16547 + rsub \scratch1,\shift_count,32/* get (32 - shift count)
16548 + (if shift count > 32 we get a
16549 + negative value, but that will
16550 + work as well in the code below.) */
16551 +
16552 + cp.w \shift_count,32 /* handle shifts >= 32 separately */
16553 + brhs 70f
16554 +
16555 + /* small (<32) shift amount, both words are part of the shift
16556 + first remember whether part that is lost contains any 1 bits ... */
16557 + lsl \scratch2,\mant_lo,\scratch1 /*shift away bits that are part of
16558 + final mantissa. only part that goes
16559 + to scratch2 are bits that will be lost */
16560 +
16561 + /* ... and now to the actual shift */
16562 + lsl \scratch1,\mant_hi,\scratch1 /* get bits from msw destined for lsw*/
16563 + lsr \mant_lo,\mant_lo,\shift_count /* shift down lsw of mantissa */
16564 + lsr \mant_hi,\mant_hi,\shift_count /* shift down msw of mantissa */
16565 + or \mant_lo,\scratch1 /* combine these bits with prepared lsw*/
16566 + rjmp 71f
16567 +
16568 + /* large (>=32) shift amount, only lsw will have bits left after shift.
16569 + note that shift operations will use ((shift count) mod 32) so
16570 + we do not need to subtract 32 from shift count. */
16571 +70:
16572 + /* first remember whether part that is lost contains any 1 bits ... */
16573 + lsl \scratch2,\mant_hi,\scratch1 /* save all lost bits from msw */
16574 + or \scratch2,\mant_lo /* also save lost bits (all) from lsw
16575 + now scratch2<>0 if we lose any bits */
16576 +
16577 + /* ... and now to the actual shift */
16578 + mov \mant_lo,\mant_hi /* msw -> lsw (i.e. "shift 32 first")*/
16579 + mov \mant_hi,0 /* clear msw */
16580 + lsr \mant_lo,\mant_lo,\shift_count /* make rest of shift inside lsw*/
16581 +
16582 +71:
16583 + cp.w \scratch2,0 /* if any '1' bit in part we lost ...*/
16584 + breq 70f
16585 +
16586 + sbr \mant_lo,0 /* ... we need to set sticky bit*/
16587 +70:
16588 +.endm
16589 +
16590 +/* Unpack exponent and mantissa from the double number
16591 + stored in [df_hi,df_lo]. The exponent is stored in [exp]
16592 + while the mantissa is stored in [df_hi,df_lo]. */
16593 +
16594 +.macro unpack_df exp, df_lo, df_hi
16595 + lsr \exp, \df_hi,21 /* Extract exponent */
16596 + lsl \df_hi,10 /* Get mantissa */
16597 + or \df_hi,\df_hi,\df_lo>>21
16598 + lsl \df_lo,11
16599 +
16600 + neg \exp /* Fix implicit bit */
16601 + bst \df_hi,31
16602 + subeq \exp,1
16603 + neg \exp /* negate back exponent */
16604 + .endm
16605 +
16606 +/* Unpack exponent and mantissa from the single float number
16607 + stored in [sf]. The exponent is stored in [exp]
16608 + while the mantissa is stored in [sf]. */
16609 +.macro unpack_sf exp, sf
16610 + lsr \exp, \sf, 24
16611 + brne 80f
16612 + /* Fix subnormal number */
16613 + lsl \sf,7
16614 + clz \exp,\sf
16615 + lsl \sf,\sf,\exp
16616 + rsub \exp,\exp,1
16617 + rjmp 81f
16618 +80:
16619 + lsl \sf,7
16620 + sbr \sf, 31 /*Implicit bit*/
16621 +81:
16622 +.endm
16623 +
16624 +
16625 +
16626 +/* Pack a single float number stored in [mant] and [exp]
16627 + into a single float number in [sf] */
16628 +.macro pack_sf sf, exp, mant
16629 + bld \mant,31 /* implicit bit to z */
16630 + subne \exp,1 /* if subnormal (implicit bit 0)
16631 + adjust exponent to storage format */
16632 +
16633 + lsr \sf, \mant, 7
16634 + bfins \sf, \exp, 24, 8
16635 +.endm
16636 +
16637 +/* Pack exponent [exp] and mantissa [mant_hi, mant_lo]
16638 + into [df_hi, df_lo]. [df_hi] is shifted
16639 + one bit up so the sign bit can be shifted into it */
16640 +
16641 +.macro pack_df exp, mant_lo, mant_hi, df_lo, df_hi
16642 + bld \mant_hi,31 /* implicit bit to z */
16643 + subne \exp,1 /* if subnormal (implicit bit 0)
16644 + adjust exponent to storage format */
16645 +
16646 + lsr \mant_lo,11 /* shift back lsw */
16647 + or \df_lo,\mant_lo,\mant_hi<<21 /* combine with low bits from msw */
16648 + lsl \mant_hi,1 /* get rid of implicit bit */
16649 + lsr \mant_hi,11 /* shift back msw except for one step*/
16650 + or \df_hi,\mant_hi,\exp<<21 /* combine msw with exponent */
16651 +.endm
16652 +
16653 +/* Normalize single float number stored in [mant] and [exp]
16654 + using scratch register [scratch] */
16655 +.macro normalize_sf exp, mant, scratch
16656 + /* Adjust exponent and mantissa */
16657 + clz \scratch, \mant
16658 + sub \exp, \scratch
16659 + lsl \mant, \mant, \scratch
16660 +.endm
16661 +
16662 +/* Normalize the exponent and mantissa pair stored
16663 + in [mant_hi,mant_lo] and [exp]. Needs two scratch
16664 + registers [scratch1] and [scratch2]. */
16665 +.macro normalize_df exp, mant_lo, mant_hi, scratch1, scratch2
16666 + clz \scratch1,\mant_hi /* Check if we have zeros in high bits */
16667 + breq 80f /* No need for scaling if no zeros in high bits */
16668 + cp.w \scratch1,32 /* Check for all zeros */
16669 + breq 81f
16670 +
16671 + /* shift amount is smaller than 32, and involves both msw and lsw*/
16672 + rsub \scratch2,\scratch1,32 /* shift mantissa */
16673 + lsl \mant_hi,\mant_hi,\scratch1
16674 + lsr \scratch2,\mant_lo,\scratch2
16675 + or \mant_hi,\scratch2
16676 + lsl \mant_lo,\mant_lo,\scratch1
16677 + sub \exp,\scratch1 /* adjust exponent */
16678 + rjmp 80f /* Finished */
16679 +81:
16680 + /* shift amount is greater than 32 */
16681 + clz \scratch1,\mant_lo /* shift mantissa */
16682 + sub \scratch1,-32
16683 + mov \mant_hi,\mant_lo
16684 + lsl \mant_hi,\mant_hi,\scratch1
16685 + mov \mant_lo,0
16686 + sub \exp,\scratch1 /* adjust exponent */
16687 +80:
16688 +.endm
16689 +
16690 +
16691 +#ifdef L_avr32_f64_mul
16692 + .align 2
16693 + .global __avr32_f64_mul
16694 + .type __avr32_f64_mul,@function
16695 +
16696 +__avr32_f64_mul:
16697 + pushm r0-r3,r4-r7,lr
16698 +
16699 + /* Unpack */
16700 + eor r12, r11, r9 /* Sign op1 ^ Sign op2 is MSB of r12*/
16701 + lsl r11,1 /* Unpack op1 */
16702 + lsl r9,1 /* Unpack op2 */
16703 +
16704 + /* Sort operands op1 >= op2 */
16705 + lddpc r5, .Linf
16706 + cp.w r10,r8
16707 + cpc r11,r9
16708 + brhs 0f
16709 +
16710 + mov r7,r11 /* swap operands if op2 was larger */
16711 + mov r6,r10
16712 + mov r11,r9
16713 + mov r10,r8
16714 + mov r9,r7
16715 + mov r8,r6
16716 +
16717 +0:
16718 + /* Check against infinity */
16719 + cp.w r11,r5
16720 + brlo 1f
16721 + /* infinity or nan */
16722 + /* we have to check low word as well as nan mantissa may be 0 in msw*/
16723 + cpc r10
16724 + /* we know that op1 is inf or nan. if z != 1 then we have nan.
16725 + in this case, also return nan. */
16726 + breq 0f
16727 + /* Return NaN */
16728 + mov r11, -1
16729 + rjmp __dfmul_return_op1
16730 +0:
16731 +
16732 + /* op1 is infinity. op2 is smaller or same so it cannot be nan.
16733 + it can be infinity or a (sub-)normal number.
16734 + we should return op1 (infinity) except when op2 is zero when
16735 + result should be nan. */
16736 + or r5,r9,r8
16737 + brne __dfmul_return_op1 /* op2 is not zero. return op1.*/
16738 + /* Return NaN */
16739 + mov r11, -1
16740 + rjmp __dfmul_return_op1
16741 +
16742 +1:
16743 + /* no operand is inf/nan, and operands have been arranged in order
16744 + with op1 >= op2, implying that if we have a zero, it is found in
16745 + op2. in this case, result should be zero (with sign from both ops). */
16746 +
16747 + or r5,r9,r8 /* check the smaller value for zero */
16748 + brne 0f
16749 + mov r10, 0
16750 + mov r11, 0
16751 + rjmp __dfmul_return_op1 /* Early exit */
16752 +0:
16753 +
16754 + /* we have two "normal" (can be subnormal) nonzero numbers in r11:r10
16755 + and r9:r8. sign of result is already calculated in r12.
16756 + perform a normal multiplication. */
16757 +
16758 + /* Unpack and normalize*/
16759 + unpack_df r7 /*exp*/, r10, r11 /* mantissa */
16760 + normalize_df r7 /*exp*/, r10, r11 /* mantissa */, r4, r5 /* scratch */
16761 +
16762 +
16763 + /* Unpack and normalize*/
16764 + unpack_df r6 /*exp*/, r8, r9 /* mantissa */
16765 + normalize_df r6 /*exp*/, r8, r9 /* mantissa */, r4, r5 /* scratch */
16766 +
16767 + /* Multiply */
16768 +
16769 + mulu.d r0,r10,r8
16770 + add lr,r7,r6 /* calculate new exponent after mul */
16771 + mulu.d r2,r11,r8
16772 + sub lr,(1023-1) /* remove exponent bias as we have
16773 + included bias from both op1 and op2
16774 + sub one less, or in other words
16775 + add one to exponent. see below why. */
16776 + mulu.d r6,r11,r9
16777 + add r2,r1
16778 + mulu.d r4,r10,r9
16779 +
16780 +
16781 + adc r6,r6,r3
16782 + acr r7
16783 +
16784 + add r4,r2
16785 + adc r6,r6,r5
16786 + acr r7
16787 +
16788 + // r7:r6 is now in range [0x4000...0000 - 0xffff...fffe]
16789 + // remaining bits in r0 and r4 are of no interest, except that we have
16790 + // to add a sticky bit to r10 in case we had a 1 bit in r4 or r0.
16791 +
16792 + or r4,r0
16793 + movne r0, 1 /* If we have bits in r4 or r0 */
16794 + or r6,r0 /* set lsb of result to 1 */
16795 +
16796 +
16797 + // if msb is set, it was because multiplication gave an "overflow"
16798 + // of one bit so exponent should be incremented.
16799 + // we already did that above so we are done.
16800 + // if msb is *not* set it will be normalized and exponent will be
16801 + // decremented (which will compensate the one we added above).
16802 +
16803 + normalize_df lr /*exp*/, r6, r7 /* mantissa */, r8, r9 /* scratch */
16804 +
16805 + /* Check if a subnormal result was created */
16806 + cp.w lr, 0
16807 + brgt 0f
16808 +
16809 + adjust_subnormal_df lr, r6, r7, r12, r8, r9
16810 + mov r10, r6
16811 + mov r11, r7
16812 + popm r0-r3,r4-r7, pc
16813 +0:
16814 +
16815 + /* Round result */
16816 + round_df lr /*exp*/, r6, r7 /* Mantissa */, r4 /*scratch*/
16817 + cp.w lr,0x7ff
16818 + brlt 0f
16819 + /*Return infinity */
16820 + lddpc r11, .Linf
16821 + mov r10, 0
16822 + rjmp __dfmul_return_op1
16823 +
16824 +0:
16825 +
16826 + /* Pack */
16827 + pack_df lr /*exp*/, r6, r7 /* mantissa */, r10, r11 /* Output df number*/
16828 +__dfmul_return_op1:
16829 + lsl r12,1 /* shift in sign bit */
16830 + ror r11
16831 +
16832 + popm r0-r3,r4-r7, pc
16833 +
16834 +#endif
16835 +
16836 +
16837 +#ifdef L_avr32_f64_addsub
16838 + .align 2
16839 + .global __avr32_f64_sub
16840 + .type __avr32_f64_sub,@function
16841 +
16842 +__avr32_f64_sub:
16843 + pushm r4-r7,lr
16844 +
16845 + eor r12,r11,r9 // compare signs of operands
16846 + bld r12,31
16847 + brcc __dfsub // same sign => subtract
16848 +
16849 + eorh r9,0x8000
16850 + rjmp __dfadd // different signs => op1 + (-op2)
16851 +__dfsub:
16852 +
16853 + lsl r11,1 // unpack op1 msw and get sign in c
16854 + or r4,r11,r10 // check if all bits zero
16855 + brne 1f
16856 +
16857 + // op1 is zero, negate op2 and handle as add
16858 + eorh r9,0x8000
16859 + // op1 is +/-0, and is unpacked with sign in c. add to op2.
16860 + // also used by sub, but op2 has been negated in this case
16861 + ror r12 // save sign of op1 in msb of r12
16862 + lsl r9,1 // unpack msw and get sign of op2
16863 + or r4,r9,r8 // check all bits in op2
16864 + breq 0f
16865 +
16866 + // if op2 != 0, then return op2 unchanged.
16867 + ror r9 // pack op2 msw again with sign from c
16868 + mov r11,r9
16869 + mov r10,r8
16870 + popm r4-r7,pc
16871 +
16872 +0:
16873 + // both op1 and op2 zero, but sign unknown. result should and signs.
16874 + ror r9 // pack op2 msw again with sign from c
16875 + lsl r12,1 // get back sign of op1 into c ...
16876 + ror r11 // and back in original op1
16877 + and r11,r9 // and sign bits. as op1 is zero, the
16878 + // only bit which can be 1 is sign bit
16879 + popm r4-r7,pc
16880 +
16881 +1:
16882 + ror r12 // save op1 sign in msb of r12
16883 +
16884 + lsl r9,1 // unpack op2 msw
16885 + or r4,r8,r9
16886 + brne 0f
16887 + // op2 is zero, return op1
16888 + // whatever it is. the only case
16889 + // requiring special handling is if
16890 + // op1 is zero, but that was handled
16891 + // above.
16892 + lsl r12, 1
16893 + ror r11
16894 + popm r4-r7,pc
16895 +
16896 +0:
16897 + // make sure that op1 >= op2, flip sign if we swap ops
16898 + cp.w r10,r8
16899 + cpc r11,r9
16900 + brhs 0f
16901 +
16902 + com r12 // sign of op1 and result in lsb(r12)
16903 + mov r7,r11 // swap operands if op2 was larger
16904 + mov r6,r10
16905 + mov r11,r9
16906 + mov r10,r8
16907 + mov r9,r7
16908 + mov r8,r6
16909 +
16910 +0:
16911 + // check if op1 is nan or inf.
16912 + lddpc r5,.Linf
16913 + cp.w r11,r5
16914 + brlo 1f
16915 + /* Op 1 is nan or inf */
16916 + // we have to check low word as well as nan mantissa may be 0 in msw
16917 + cpc r10
16918 + // we know that op1 is inf or nan. if z != 1 then we have nan.
16919 + // if we have nan, return nan.
16920 + breq 0f
16921 + mov r11, -1
16922 + rjmp __dfsub_return_op1
16923 +0:
16924 +
16925 + // op1 is infinity. check if op2 is nan, infinty or a normal number.
16926 + cp.w r9,r5
16927 + movhs r11, -1 // op2 is a normal number. return op1.
16928 +
16929 + // op2 can be infinity (of the same sign as op1) or nan.
16930 + // in both cases we should return nan.
16931 + rjmp __dfsub_return_op1
16932 +1:
16933 + // if op1 is not inf or nan, then op2 cannot be since op1 >= op2
16934 +
16935 + // now prepare the operands by expanding them and shifting op2
16936 + // to the correct position for the subtract. note! if op2 is
16937 + // insignificant compared to op1, the function will take care of
16938 + // this and return op1 directly to the application.
16939 +
16940 + /* Unpack operands */
16941 + unpack_df r7 /* exp op1*/, r10, r11 /* Mantissa op1 */
16942 + unpack_df r6 /* exp op2*/, r8, r9 /* Mantissa op2 */
16943 +
16944 + /* Get shift amount required for aligning op1 and op2 */
16945 + rsub r6, r7
16946 + breq __perform_dfsub /* No shift needed */
16947 +
16948 + cp.w r6, 63
16949 + brhs __dfsub_pack_result /* Op 2 insignificant compared to op1 */
16950 +
16951 + /* Shift mantissa of op2 so that op1 and op2 are aligned */
16952 + scale_df r6 /* shift_count*/, r8, r9 /* Mantissa */, r4, r5 /*Scratch*/
16953 +
16954 +__perform_dfsub:
16955 + sub r10,r8 /* subtract mantissa of op2 from op1 */
16956 + sbc r11,r11,r9
16957 + or r4,r11,r10 /* check if result is all zeroes */
16958 + brne 0f
16959 + popm r4-r7,pc /* Early return */
16960 +0:
16961 +
16962 + normalize_df r7 /*exp*/, r10, r11 /* mantissa */, r8, r9 /* scratch */
16963 +
16964 + /* Check if a subnormal result was created */
16965 + cp.w r7, 0
16966 + brgt 0f
16967 +
16968 + adjust_subnormal_df r7 /*exp*/, r10, r11 /* Mantissa */, r12 /*sign*/, r8, r9 /*scratch*/
16969 + popm r4-r7,pc
16970 +0:
16971 +
16972 + /* Round result */
16973 + round_df r7 /*exp*/, r10, r11 /* Mantissa */, r9 /*scratch*/
16974 + cp.w r7,0x7ff
16975 + brlt __dfsub_pack_result
16976 + /*Return infinity */
16977 + lddpc r11, .Linf
16978 + mov r10, 0
16979 + rjmp __dfsub_return_op1
16980 +
16981 +__dfsub_pack_result:
16982 + /* Pack */
16983 + pack_df r7 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
16984 +
16985 +__dfsub_return_op1:
16986 + lsl r12,1
16987 + ror r11
16988 + popm r4-r7,pc
16989 +
16990 + .align 2
16991 + .global __avr32_f64_add
16992 + .type __avr32_f64_add,@function
16993 +__avr32_f64_add:
16994 + pushm r4-r7,lr
16995 + eor r12,r11,r9 // compare signs of operands
16996 + lsl r12,1
16997 + brcc __dfadd // same sign => add
16998 +
16999 + eorh r9,0x8000
17000 + rjmp __dfsub // different signs => op1 - (-op2)
17001 +__dfadd:
17002 +
17003 + lsl r11,1 // unpack op1 msw and get sign in c
17004 + or r4,r11,r10 // check if all bits zero
17005 + brne 1f
17006 +
17007 + // op1 is +/-0, and is unpacked with sign in c. add to op2.
17008 + // also used by sub, but op2 has been negated in this case
17009 + ror r12 // save sign of op1 in msb of r12
17010 + lsl r9,1 // unpack msw and get sign of op2
17011 + or r4,r9,r8 // check all bits in op2
17012 + breq 0f
17013 +
17014 + // if op2 != 0, then return op2 unchanged.
17015 + ror r9 // pack op2 msw again with sign from c
17016 + mov r11,r9
17017 + mov r10,r8
17018 + popm r4-r7,pc
17019 +
17020 +0:
17021 + // both op1 and op2 zero, but sign unknown. result should and signs.
17022 + ror r9 // pack op2 msw again with sign from c
17023 + lsl r12,1 // get back sign of op1 into c ...
17024 + ror r11 // and back in original op1
17025 + and r11,r9 // and sign bits. as op1 is zero, the
17026 + // only bit which can be 1 is sign bit
17027 + popm r4-r7,pc
17028 +1:
17029 + ror r12 // save op1 sign in msb of r12
17030 +
17031 + lsl r9,1 // unpack op2 msw
17032 + or r4,r8,r9
17033 + brne 0f
17034 + // op2 is zero, return op1
17035 + // whatever it is. the only case
17036 + // requiring special handling is if
17037 + // op1 is zero, but that was handled
17038 + // above.
17039 + lsl r12, 1
17040 + ror r11
17041 + popm r4-r7,pc
17042 +0:
17043 + // make sure that exp[op1] >= exp[op2]
17044 + cp.w r11,r9
17045 + brhs 0f
17046 +
17047 + mov r7,r11 // swap operands if op2 was larger
17048 + mov r6,r10
17049 + mov r11,r9
17050 + mov r10,r8
17051 + mov r9,r7
17052 + mov r8,r6
17053 +
17054 +0:
17055 + // check if op1 is nan or inf.
17056 + lddpc r5,.Linf
17057 + cp.w r11,r5
17058 + brlo 1f
17059 + /* Op 1 is nan or inf */
17060 + // we have to check low word as well as nan mantissa may be 0 in msw
17061 + cpc r10
17062 + // we know that op1 is inf or nan. if z != 1 then we have nan.
17063 + // if we have nan, return nan.
17064 + breq 0f
17065 + mov r11, -1
17066 + rjmp __dfadd_return_op1
17067 +0:
17068 +
17069 + // op1 is infinity. check if op2 is nan, infinty or a normal number.
17070 + cp.w r9,r5
17071 + // Op2 is NaN of Inf. Return op2 but with sign of result.
17072 + // If Op2 is NaN, sign doesn't matter but no need to separate NaN
17073 + movhs r11, r9
17074 + movhs r10, r8
17075 +
17076 + // op2 can be infinity (of the same sign as op1) or nan.
17077 + // in both cases we should return nan.
17078 + rjmp __dfadd_return_op1
17079 +1:
17080 + // if op1 is not inf or nan, then op2 cannot be since exp[op1] >=
17081 + // exp[op2]
17082 +
17083 + // now prepare the operands by expanding them and shifting op2
17084 + // to the correct position for the add. note! if op2 is
17085 + // insignificant compared to op1, the function will take care of
17086 + // this and return op1 directly to the application.
17087 +
17088 + /* Unpack operands */
17089 + unpack_df r7 /* exp op1*/, r10, r11 /* Mantissa op1 */
17090 + unpack_df r6 /* exp op2*/, r8, r9 /* Mantissa op2 */
17091 +
17092 + /* Get shift amount required for aligning op1 and op2 */
17093 + rsub r6, r7
17094 + breq __perform_dfadd /* No shift needed */
17095 +
17096 + cp.w r6, 63
17097 + brhs __dfadd_pack_result /* Op 2 insignificant compared to op1 */
17098 +
17099 + /* Shift mantissa of op2 so that op1 and op2 are aligned */
17100 + scale_df r6 /* shift_count*/, r8, r9 /* Mantissa */, r4, r5 /*Scratch*/
17101 +
17102 +__perform_dfadd:
17103 + add r10,r8 // add mantissas
17104 + adc r11,r11,r9
17105 + brcc 0f
17106 + ror r11 // overflow => shift down mantissa
17107 + ror r10
17108 + brcc 1f // sticky bit shifted out?
17109 + sbr r10,0 // if so, merge it into result again
17110 +1:
17111 + sub r7,-1 // increase exponent with 1
17112 +0:
17113 + normalize_df r7 /*exp*/, r10, r11 /* mantissa */, r8, r9 /* scratch */
17114 +
17115 + /* Check if a subnormal result was created */
17116 + cp.w r7, 0
17117 + brgt 0f
17118 +
17119 + adjust_subnormal_df r7 /*exp*/, r10, r11 /* Mantissa */, r12 /*sign*/, r8, r9 /*scratch*/
17120 + popm r4-r7,pc
17121 +0:
17122 +
17123 + /* Round result */
17124 + round_df r7 /*exp*/, r10, r11 /* Mantissa */, r9 /*scratch*/
17125 + cp.w r7,0x7ff
17126 + brlt __dfadd_pack_result
17127 + /*Return infinity */
17128 + lddpc r11, .Linf
17129 + mov r10, 0
17130 + rjmp __dfadd_return_op1
17131 +
17132 +__dfadd_pack_result:
17133 + /* Pack */
17134 + pack_df r7 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
17135 +
17136 +__dfadd_return_op1:
17137 + lsl r12,1
17138 + ror r11
17139 + popm r4-r7,pc
17140 +#endif
17141 +
17142 +#ifdef L_avr32_f64_to_u32
17143 + /* This goes into L_fixdfsi */
17144 +#endif
17145 +
17146 +
17147 +#ifdef L_avr32_f64_to_s32
17148 + .global __avr32_f64_to_u32
17149 + .type __avr32_f64_to_u32,@function
17150 +__avr32_f64_to_u32:
17151 + cp.w r11, 0
17152 + retmi 0 /* Negative returns 0 */
17153 +#ifdef __LARGE_FLOATS__
17154 + lsl r12,r11,1
17155 + lsr r12,21 /* extract exponent*/
17156 + sub r12,1023 /* convert to unbiased exponent.*/
17157 + retlo 0 /* too small exponent implies zero. */
17158 + cp.w r12,32
17159 + brcc 0f
17160 + rjmp 1f
17161 +#endif
17162 +
17163 + /* Fallthrough to df to signed si conversion */
17164 + .global __avr32_f64_to_s32
17165 + .type __avr32_f64_to_s32,@function
17166 +__avr32_f64_to_s32:
17167 + lsl r12,r11,1
17168 + lsr r12,21 /* extract exponent*/
17169 + sub r12,1023 /* convert to unbiased exponent.*/
17170 + retlo 0 /* too small exponent implies zero. */
17171 +
17172 +#ifdef __LARGE_FLOATS__
17173 + cp.w r12,31
17174 + brcc 0f
17175 +#endif
17176 +1:
17177 + rsub r12,r12,31 /* shift count = 31 - exponent */
17178 + mov r9,r11 /* save sign for later...*/
17179 + lsl r11,11 /* remove exponent and sign*/
17180 + sbr r11,31 /* add implicit bit*/
17181 + or r11,r11,r10>>21 /* get rest of bits from lsw of double */
17182 + lsr r11,r11,r12 /* shift down mantissa to final place */
17183 + lsl r9,1 /* sign -> carry */
17184 + retcc r11 /* if positive, we are done */
17185 + neg r11 /* if negative float, negate result */
17186 + ret r11
17187 +
17188 +#ifdef __LARGE_FLOATS__
17189 +0:
17190 + mov r12,-1 /* r11 = 0xffffffff */
17191 + lsr r12,1 /* r11 = 0x7fffffff */
17192 + lsl r11,1 /* sign -> carry */
17193 + acr r12 /* r11 = signed ? 0x80000000
17194 + : 0x7fffffff */
17195 + ret r12
17196 +#endif
17197 +#endif /* L_fixdfsi*/
17198 +
17199 +#ifdef L_avr32_f64_to_u64
17200 + /* Actual function is in L_fixdfdi */
17201 +#endif
17202 +
17203 +#ifdef L_avr32_f64_to_s64
17204 + .global __avr32_f64_to_u64
17205 + .type __avr32_f64_to_u64,@function
17206 +__avr32_f64_to_u64:
17207 + cp.w r11,0
17208 + /* Negative numbers return zero */
17209 + movmi r10, 0
17210 + movmi r11, 0
17211 + retmi r11
17212 +#ifdef __LARGE_FLOATS__
17213 + lsl r9,r11,1
17214 + lsr r9,21 /* get exponent*/
17215 + sub r9,1023 /* convert to correct range*/
17216 + /* Return zero if exponent to small */
17217 + movlo r10, 0
17218 + movlo r11, 0
17219 + retlo r11
17220 + cp.w r9,64
17221 + mov r8,r11 /* save sign for later...*/
17222 + brcs 1f
17223 + rjmp 2f /* Number to large */
17224 +
17225 +#endif
17226 +
17227 +
17228 +
17229 + /* Fallthrough */
17230 + .global __avr32_f64_to_s64
17231 + .type __avr32_f64_to_s64,@function
17232 +__avr32_f64_to_s64:
17233 + lsl r9,r11,1
17234 + lsr r9,21 /* get exponent*/
17235 + sub r9,1023 /* convert to correct range*/
17236 + /* Return zero if exponent to small */
17237 + movlo r10, 0
17238 + movlo r11, 0
17239 + retlo r11
17240 +
17241 +#ifdef __LARGE_FLOATS__
17242 + cp.w r9,63
17243 + mov r8,r11 /* save sign for later...*/
17244 + brcc 2f
17245 +#else
17246 + mov r8,r11 /* save sign for later...*/
17247 +#endif
17248 +1:
17249 + lsl r11,11 /* remove exponent */
17250 + sbr r11,31 /* add implicit bit*/
17251 + or r11,r11,r10>>21 /* get rest of bits from lsw of double*/
17252 + lsl r10,11 /* align lsw correctly as well */
17253 + rsub r9,r9,63 /* shift count = 63 - exponent */
17254 + breq 1f
17255 +
17256 + cp.w r9,32 /* is shift count more than one reg? */
17257 + brhs 0f
17258 +
17259 + mov r12,r11 /* save msw */
17260 + lsr r10,r10,r9 /* small shift count, shift down lsw */
17261 + lsr r11,r11,r9 /* small shift count, shift down msw */
17262 + rsub r9,r9,32 /* get 32-size of shifted out tail */
17263 + lsl r12,r12,r9 /* align part to move from msw to lsw */
17264 + or r10,r12 /* combine to get new lsw */
17265 + rjmp 1f
17266 +
17267 +0:
17268 + lsr r10,r11,r9 /* large shift count,only lsw get bits
17269 + note that shift count is modulo 32*/
17270 + mov r11,0 /* msw will be 0 */
17271 +
17272 +1:
17273 + lsl r8,1 /* sign -> carry */
17274 + retcc r11 /* if positive, we are done */
17275 +
17276 + neg r11 /* if negative float, negate result */
17277 + neg r10
17278 + scr r11
17279 + ret r11
17280 +
17281 +
17282 +#ifdef __LARGE_FLOATS__
17283 +2:
17284 + mov r11,-1 /* r11 = 0xffffffff */
17285 + lsr r11,1 /* r11 = 0x7fffffff */
17286 + lsl r8,1 /* sign -> carry */
17287 + acr r11 /* r11 = signed ? 0x80000000 */
17288 + /* : 0x7fffffff */
17289 + lsl r10,r11,31 /* extend last bit of msw*/
17290 + asr r10,31
17291 + ret r11
17292 +#endif
17293 +#endif
17294 +
17295 +#ifdef L_avr32_u32_to_f64
17296 + /* Code located in L_floatsidf */
17297 +#endif
17298 +
17299 +#ifdef L_avr32_s32_to_f64
17300 + .global __avr32_u32_to_f64
17301 + .type __avr32_u32_to_f64,@function
17302 +__avr32_u32_to_f64:
17303 + sub r11, r12, 0 /* Move to r11 and force Z flag to be updated */
17304 + mov r12, 0 /* always positive */
17305 + rjmp 0f /* Jump to common code for floatsidf */
17306 +
17307 + .global __avr32_s32_to_f64
17308 + .type __avr32_s32_to_f64,@function
17309 +__avr32_s32_to_f64:
17310 + mov r11, r12 /* Keep original value in r12 for sign */
17311 + abs r11 /* Absolute value if r12 */
17312 +0:
17313 + mov r10,0 /* let remaining bits be zero */
17314 + reteq r11 /* zero long will return zero float */
17315 +
17316 + pushm lr
17317 + mov r9,31+1023 /* set exponent */
17318 +
17319 + normalize_df r9 /*exp*/, r10, r11 /* mantissa */, r8, lr /* scratch */
17320 +
17321 + /* Check if a subnormal result was created */
17322 + cp.w r9, 0
17323 + brgt 0f
17324 +
17325 + adjust_subnormal_df r9 /* exp */, r10, r11 /* Mantissa */, r12 /*sign*/, r8, lr /* scratch */
17326 + popm pc
17327 +0:
17328 +
17329 + /* Round result */
17330 + round_df r9 /*exp*/, r10, r11 /* Mantissa */, r8 /*scratch*/
17331 + cp.w r9,0x7ff
17332 + brlt 0f
17333 + /*Return infinity */
17334 + lddpc r11, .Linf
17335 + mov r10, 0
17336 + rjmp __floatsidf_return_op1
17337 +
17338 +0:
17339 +
17340 + /* Pack */
17341 + pack_df r9 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
17342 +__floatsidf_return_op1:
17343 + lsl r12,1 /* shift in sign bit */
17344 + ror r11
17345 +
17346 + popm pc
17347 +#endif
17348 +
17349 +
17350 +#ifdef L_avr32_f32_cmp_eq
17351 + .global __avr32_f32_cmp_eq
17352 + .type __avr32_f32_cmp_eq,@function
17353 +__avr32_f32_cmp_eq:
17354 + cp.w r12, r11
17355 + brne 0f /* If not equal check for +/-0 */
17356 +
17357 + /* Check for NaN or Inf */
17358 + lddpc r11,.Linf_sf
17359 + lsl r12, 1
17360 + cp.w r12, r11
17361 + srls r12 /* 0 if NaN, 1 otherwise */
17362 + ret r12
17363 +0:
17364 + /* Or together the two values and shift out the sign bit.
17365 + If the result is zero, then the two values are both zero. */
17366 + or r12, r11
17367 + lsl r12, 1
17368 + sreq r12
17369 + ret r12
17370 +#endif
17371 +
17372 +#if defined(L_avr32_f32_cmp_ge) || defined(L_avr32_f32_cmp_lt)
17373 +#ifdef L_avr32_f32_cmp_ge
17374 + .global __avr32_f32_cmp_ge
17375 + .type __avr32_f32_cmp_ge,@function
17376 +__avr32_f32_cmp_ge:
17377 +#endif
17378 +#ifdef L_avr32_f32_cmp_lt
17379 + .global __avr32_f32_cmp_lt
17380 + .type __avr32_f32_cmp_lt,@function
17381 +__avr32_f32_cmp_lt:
17382 +#endif
17383 + lsl r10, r12, 1 /* Remove sign bits */
17384 + lsl r9, r11, 1
17385 + lddpc r8, .Linf_sf
17386 + cp.w r10, r8
17387 + rethi 0 /* Op0 is NaN */
17388 + cp.w r9, r8
17389 + rethi 0 /* Op1 is Nan */
17390 +
17391 + eor r8, r11, r12
17392 + bld r12, 31
17393 +#ifdef L_avr32_f32_cmp_ge
17394 + srcc r8 /* Set result to true if op0 is positive*/
17395 +#endif
17396 +#ifdef L_avr32_f32_cmp_lt
17397 + srcs r8 /* Set result to true if op0 is negative*/
17398 +#endif
17399 + retmi r8 /* Return if signs are different */
17400 + brcs 0f /* Both signs negative? */
17401 +
17402 + /* Both signs positive */
17403 + cp.w r12, r11
17404 +#ifdef L_avr32_f32_cmp_ge
17405 + srhs r12
17406 +#endif
17407 +#ifdef L_avr32_f32_cmp_lt
17408 + srlo r12
17409 +#endif
17410 + retal r12
17411 +0:
17412 + /* Both signs negative */
17413 + cp.w r11, r12
17414 +#ifdef L_avr32_f32_cmp_ge
17415 + srhs r12
17416 +#endif
17417 +#ifdef L_avr32_f32_cmp_lt
17418 + srlo r12
17419 +#endif
17420 + retal r12
17421 +#endif
17422 +
17423 +
17424 +#ifdef L_avr32_f64_cmp_eq
17425 + .global __avr32_f64_cmp_eq
17426 + .type __avr32_f64_cmp_eq,@function
17427 +__avr32_f64_cmp_eq:
17428 + cp.w r10,r8
17429 + cpc r11,r9
17430 + brne 0f /* Both args could be zero with different sign bits */
17431 +
17432 + /* check for NaN */
17433 + lsl r11,1
17434 + lddpc r12,.Linf
17435 + cp.w r10,0
17436 + cpc r11,r12 /* check if nan or inf */
17437 + srls r12 /* If Arg is NaN return 0 else 1*/
17438 + ret r12 /* Return */
17439 +
17440 +0:
17441 + lsl r11,1 /* get rid of sign bits */
17442 + lsl r9,1
17443 + or r11,r10 /* Check if all bits are zero */
17444 + or r11,r9
17445 + or r11,r8
17446 + sreq r12 /* If all zeros the arguments are equal
17447 + so return 1 else return 0 */
17448 + ret r12
17449 +#endif
17450 +
17451 +
17452 +#if defined(L_avr32_f64_cmp_ge) || defined(L_avr32_f64_cmp_lt)
17453 +
17454 +#ifdef L_avr32_f64_cmp_ge
17455 + .global __avr32_f64_cmp_ge
17456 + .type __avr32_f64_cmp_ge,@function
17457 +__avr32_f64_cmp_ge:
17458 +#endif
17459 +#ifdef L_avr32_f64_cmp_lt
17460 + .global __avr32_f64_cmp_lt
17461 + .type __avr32_f64_cmp_lt,@function
17462 +__avr32_f64_cmp_lt:
17463 +#endif
17464 +
17465 + /* compare magnitude of op1 and op2 */
17466 + pushm lr
17467 +
17468 + lsl r11,1 /* Remove sign bit of op1 */
17469 + srcs lr /* Sign op1 to lsb of lr*/
17470 + lsl r9,1 /* Remove sign bit of op2 */
17471 + rol lr /* Sign op2 to lsb of lr, sign bit op1 bit 1 of lr*/
17472 +
17473 + /* Check for Nan */
17474 + lddpc r12,.Linf
17475 + cp.w r10,0
17476 + cpc r11,r12
17477 + movhi r12, 0 /* Return false for NaN */
17478 + brhi 0f /* We have NaN */
17479 + cp.w r8,0
17480 + cpc r9,r12
17481 + movhi r12, 0 /* Return false for NaN */
17482 + brhi 0f /* We have NaN */
17483 +
17484 + cp.w lr,3 /* both operands negative ?*/
17485 + breq 1f
17486 +
17487 + cp.w lr,1 /* both operands positive? */
17488 + brlo 2f
17489 +
17490 + /* Different signs. If sign of op1 is negative the difference
17491 + between op1 and op2 will always be negative, and if op1 is
17492 + positive the difference will always be positive */
17493 +#ifdef L_avr32_f64_cmp_ge
17494 + sreq r12
17495 +#endif
17496 +#ifdef L_avr32_f64_cmp_lt
17497 + srne r12
17498 +#endif
17499 + popm pc
17500 +
17501 +
17502 +2:
17503 + /* Both operands positive. Just compute the difference */
17504 + cp.w r10,r8
17505 + cpc r11,r9
17506 +#ifdef L_avr32_f64_cmp_ge
17507 + srhs r12
17508 +#endif
17509 +#ifdef L_avr32_f64_cmp_lt
17510 + srlo r12
17511 +#endif
17512 + popm pc
17513 +
17514 +1:
17515 + /* Both operands negative. Compute the difference with operands switched */
17516 + cp r8,r10
17517 + cpc r9,r11
17518 +#ifdef L_avr32_f64_cmp_ge
17519 + srhs r12
17520 +#endif
17521 +#ifdef L_avr32_f64_cmp_lt
17522 + srlo r12
17523 +#endif
17524 +0:
17525 + popm pc
17526 +#endif
17527 +
17528 +
17529 +
17530 +#ifdef L_avr32_f64_div
17531 + .global __avr32_f64_div
17532 + .type __avr32_f64_div,@function
17533 +__avr32_f64_div:
17534 + stm --sp, r2-r7,lr
17535 + eor r12, r11, r9 /* Sign(op1) ^ Sign(op2) to msb of r12*/
17536 + lsl r11,1 /* unpack op1*/
17537 + lddpc lr,.Linf
17538 + lsl r9,1 /* unpack op2*/
17539 +
17540 + cp.w r11,lr
17541 + brhs 0f /* op1 is NaN or infinity */
17542 + cp.w r9,lr
17543 + brhs 1f /* op2 is NaN or infinity */
17544 + or r5,r9,r8
17545 + breq 2f /* op2 is zero */
17546 + or r5,r11,r10
17547 + breq __dfdiv_return_op1 /* op1 is zero return zero*/
17548 +
17549 + /* Unpack and normalize */
17550 + /* op1 */
17551 + unpack_df r7 /*exp*/, r10, r11 /*df number*/
17552 + normalize_df r7 /*exp*/, r10, r11 /*Mantissa*/, r4, r5 /*scratch*/
17553 +
17554 + /* op1 */
17555 + unpack_df r6 /*exp*/, r8, r9 /*df number*/
17556 + normalize_df r6 /*exp*/, r8, r9 /*Mantissa*/, r4, r5 /*scratch*/
17557 +
17558 + /* Compute new exponent */
17559 + sub r7,r6
17560 + sub r7,-1023
17561 +
17562 + /* Do fixed point division of mantissas*/
17563 + mov r6,55
17564 + lsr r11,1
17565 + ror r10
17566 + lsr r9,1
17567 + ror r8
17568 +
17569 +3:
17570 + /* Check if dividend is higher or same than divisor */
17571 + sub r2,r10,r8
17572 + sbc r3,r11,r9
17573 + /* If so move the difference back into the dividend */
17574 + movhs r10, r2
17575 + movhs r11, r3
17576 + /* Update the Quotient */
17577 + rol r4
17578 + rol r5
17579 + eorl r4,1
17580 +
17581 + /* Shift the dividend */
17582 + lsl r10,1
17583 + rol r11
17584 +
17585 + sub r6,1
17586 + brne 3b
17587 +
17588 + /* Check if we have a remainder which will the propagate into
17589 + the last bit */
17590 +
17591 + or r11,r11,r10
17592 + neg r11
17593 + rol r4
17594 + rol r5
17595 +
17596 + /* Adjust mantissa into correct alignment */
17597 + lsl r11, r5,(64-56)
17598 + or r11,r11,r4>>(32-64+56)
17599 + lsl r10,r4, (64-56)
17600 +
17601 + /* Normalize result */
17602 + normalize_df r7 /*exp*/, r10, r11 /* mantissa */, r8, r9 /* scratch */
17603 +
17604 + /* Check if a subnormal result was created */
17605 + cp.w r7, 0
17606 + brgt 3f
17607 +
17608 + adjust_subnormal_df r7 /*exp*/, r10, r11 /* Mantissa */, r12 /*sign*/, r8, r9 /*scratch*/
17609 + ldm sp++, r2-r7,pc
17610 +3:
17611 +
17612 + /* Round result */
17613 + round_df r7 /*exp*/, r10, r11 /* Mantissa */, r9 /*scratch*/
17614 + cp.w r7,0x7ff
17615 + brlt __dfdiv_pack_result
17616 + /*Return infinity */
17617 + lddpc r11, .Linf
17618 + mov r10, 0
17619 + rjmp __dfdiv_return_op1
17620 +
17621 +__dfdiv_pack_result:
17622 + /* Pack */
17623 + pack_df r7 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
17624 +
17625 +__dfdiv_return_op1:
17626 + lsl r12,1
17627 + ror r11
17628 + ldm sp++, r2-r7,pc
17629 +
17630 +0:
17631 + /* Op1 is NaN or Inf */
17632 + cpc r10
17633 + /* If op1 is a NaN the we should return a NaN */
17634 + brne __dfdiv_return_op1
17635 +
17636 + /* Op1 is infinity, check op2*/
17637 + cp.w r9,lr
17638 + brlo __dfdiv_return_op1 /* Op2 is a normal number return inf */
17639 + /* Other combinations: return NaN */
17640 + mov r11, -1
17641 + ldm sp++, r2-r7,pc
17642 +
17643 +1:
17644 + /* Op2 is NaN or Inf */
17645 + cpc r8
17646 + /* If inf return zero else return NaN*/
17647 + mov r10, 0
17648 + moveq r11, 0
17649 + movne r11, -1
17650 + ldm sp++, r2-r7,pc
17651 +
17652 +2:
17653 + /* Op2 is zero */
17654 + or r6,r11,r10 /* 0.0/0.0 yields NaN */
17655 + mov r10, 0
17656 + moveq r11, -1 /* Return NaN */
17657 + movne r11, lr /* Return inf */
17658 + rjmp __dfdiv_return_op1
17659 +
17660 +#endif
17661 +
17662 +
17663 +#ifdef L_avr32_f32_div
17664 + .global __avr32_f32_div
17665 + .type __avr32_f32_div,@function
17666 +__avr32_f32_div:
17667 + eor r8, r11, r12 /* MSB(r8) = Sign(op1) ^ Sign(op2) */
17668 + /* Unpack */
17669 + lsl r12,1
17670 + reteq 0 /* Return zero if op1 is zero */
17671 + lddpc r9, .Linf_sf
17672 + lsl r11,1
17673 +
17674 + /* Check op1 for NaN or Inf */
17675 + cp r12,r9
17676 + brhs 2f
17677 +
17678 + /* Check op2 for NaN or Inf */
17679 + cp r11,r9
17680 + brhs 3f
17681 + /* Check op2 for zero */
17682 + tst r11,r11
17683 + breq 4f
17684 +
17685 + /* If op1 is zero return zero */
17686 + tst r12, r12
17687 + reteq 0
17688 +
17689 + /* Unpack op1*/
17690 + unpack_sf r9 /*exp*/, r12 /*sf*/
17691 +
17692 + /* Unpack op2*/
17693 + unpack_sf r10 /*exp*/, r11 /*sf*/
17694 +
17695 + /* Calculate new exponent */
17696 + stm --sp,r7,lr
17697 + sub r9, r10
17698 + sub r9,-127
17699 +
17700 + /* Divide */
17701 + mov r7,26
17702 +
17703 + lsr r12,1 /* Make room for one more bit in mantissas */
17704 + lsr r11,1
17705 +
17706 +0:
17707 + sub r10,r12,r11
17708 + movcc r12, r10 /* update dividend if divisor smaller */
17709 + rol lr /* shift result into lr */
17710 + eorl lr,1 /* flip bit. */
17711 + lsl r12,1 /* Shift dividend */
17712 + sub r7,1
17713 + brne 0b
17714 +
17715 + /* round and scale*/
17716 + neg r12 /* c = 1 iff r12 != 0 */
17717 + rol lr
17718 + lsl r10,lr,(32-27) /* Adjust mantissa */
17719 + ldm sp++, r7, lr
17720 +
17721 +
17722 + normalize_sf r9 /*exp*/, r10 /*mant*/, r11 /*scratch*/
17723 +
17724 + /* Check for subnormal result */
17725 + cp.w r9, 0
17726 + brgt 0f
17727 +
17728 + /* Adjust a subnormal result */
17729 + adjust_subnormal_sf r12 /*sf*/, r9 /*exp*/, r10 /*mant*/, r8 /*sign*/,r11 /*scratch*/
17730 + ret r12
17731 +0:
17732 + round_sf r9 /*exp*/, r10 /*mant*/, r11 /*scratch*/
17733 + pack_sf r12 /*sf*/, r9 /*exp*/, r10 /*mant*/
17734 +__divsf_return_op1:
17735 + lsl r8, 1
17736 + ror r12
17737 + ret r12
17738 +
17739 +2:
17740 + /* Op1 is NaN or inf */
17741 + retne -1 /* Return NaN if op1 is NaN */
17742 + /* Op1 is inf check op2 */
17743 + cp r11, r9
17744 + brlo __divsf_return_op1 /* inf/number gives inf */
17745 + ret -1 /* The rest gives NaN*/
17746 +3:
17747 + /* Op1 is NaN or inf */
17748 + reteq 0 /* Return zero if number/inf*/
17749 + ret -1 /* Return NaN*/
17750 +4:
17751 + /* Op2 is zero ? */
17752 + tst r12,r12
17753 + reteq -1 /* 0.0/0.0 is NaN */
17754 + lddpc r12, .Linf_sf
17755 + rjmp __divsf_return_op1
17756 +
17757 +#endif
17758 +
17759 +#ifdef L_avr32_f32_mul
17760 + .global __avr32_f32_mul
17761 + .type __avr32_f32_mul,@function
17762 +__avr32_f32_mul:
17763 + eor r8, r11, r12 /* MSB(r8) = Sign(op1) ^ Sign(op2) */
17764 + lsl r12,1 /* unpack op1 */
17765 + lsl r11,1 /* unpack op2 */
17766 +
17767 + /* arrange operands so that that op1 >= op2 */
17768 + sub r9,r12,r11
17769 + brcc 0f
17770 +
17771 + sub r12,r9 /* swap operands if op2 was larger */
17772 + add r11,r9
17773 +
17774 +0:
17775 + lddpc r9,.Linf_sf
17776 + cp r12,r9
17777 + brhs 2f
17778 +
17779 + /* Check op2 for zero */
17780 + tst r11,r11
17781 + reteq 0 /* Return zero */
17782 +
17783 + /* Unpack op1 */
17784 + unpack_sf r9 /*exp*/, r12 /*sf*/
17785 + /* Unpack op2 */
17786 + unpack_sf r10 /*exp*/, r11 /*sf*/
17787 +
17788 + /* Calculate new exponent */
17789 + add r9,r10
17790 +
17791 + /* Do the multiplication */
17792 + mulu.d r10,r12,r11
17793 +
17794 + sub r9,(127-1) /* remove extra exponent bias */
17795 +
17796 + /* Check if we have any bits in r10 which
17797 + means a rounding bit should be inserted in LSB of result */
17798 + tst r10,r10
17799 + srne r10
17800 + or r12,r11,r10
17801 +
17802 + /* Normalize */
17803 + normalize_sf r9 /*exp*/, r12 /*mant*/, r11 /*scratch*/
17804 +
17805 + /* Check for subnormal result */
17806 + cp.w r9, 0
17807 + brgt 0f
17808 +
17809 + /* Adjust a subnormal result */
17810 + adjust_subnormal_sf r12/*sf*/, r9 /*exp*/, r12 /*mant*/, r8 /*sign*/, r11 /*scratch */
17811 + ret r12
17812 +0:
17813 + round_sf r9 /*exp*/, r12 /*mant*/, r11 /*scratch*/
17814 + cp.w r9, 0xff
17815 + brlo 1f
17816 + lddpc r12,.Linf_sf
17817 + rjmp __mulsf_return_op1
17818 +1:
17819 + pack_sf r12 /*sf*/, r9 /*exp*/, r12 /*mant*/
17820 +__mulsf_return_op1:
17821 + lsl r8, 1
17822 + ror r12
17823 + ret r12
17824 +
17825 +2:
17826 + /* Op1 is inf or NaN */
17827 + retne -1 /* Op1 is NaN return NaN */
17828 +
17829 + /* Op1 is inf and op2 is smaller so it is either infinity
17830 + or a subnormal number */
17831 + cp r11,0
17832 + brne __mulsf_return_op1 /* op2 is not zero. return op1.*/
17833 + ret -1 /* inf * 0 return NaN */
17834 +#endif
17835 +
17836 +
17837 +#ifdef L_avr32_s32_to_f32
17838 + .global __avr32_s32_to_f32
17839 + .type __avr32_s32_to_f32,@function
17840 +__avr32_s32_to_f32:
17841 + cp r12, 0
17842 + reteq r12 /* If zero then return zero float */
17843 + mov r11, r12 /* Keep the sign */
17844 + abs r12 /* Compute the absolute value */
17845 + mov r10, 31 + 127 /* Set the correct exponent */
17846 +
17847 + /* Normalize */
17848 + normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
17849 +
17850 + /* Check for subnormal result */
17851 + cp.w r10, 0
17852 + brgt 0f
17853 +
17854 + /* Adjust a subnormal result */
17855 + adjust_subnormal_sf r12/*sf*/, r10 /*exp*/, r12 /*mant*/, r11/*sign*/, r9 /*scratch*/
17856 + ret r12
17857 +0:
17858 + round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
17859 + pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
17860 +__floatsisf_return_op1:
17861 + lsl r11, 1
17862 + ror r12
17863 + ret r12
17864 +#endif
17865 +
17866 +#ifdef L_avr32_u32_to_f32
17867 + .global __avr32_u32_to_f32
17868 + .type __avr32_u32_to_f32,@function
17869 +__avr32_u32_to_f32:
17870 + cp r12, 0
17871 + reteq r12 /* If zero then return zero float */
17872 + mov r10, 31 + 127 /* Set the correct exponent */
17873 +
17874 + /* Normalize */
17875 + normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
17876 +
17877 + /* Check for subnormal result */
17878 + cp.w r10, 0
17879 + brgt 0f
17880 +
17881 + /* Adjust a subnormal result */
17882 + mov r8, 0
17883 + adjust_subnormal_sf r12/*sf*/,r10 /*exp*/, r12 /*mant*/,r8/*sign*/, r9 /*scratch*/
17884 + ret r12
17885 +0:
17886 + round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
17887 + pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
17888 +__floatunsisf_return_op1:
17889 + lsr r12,1 /* Sign bit is 0 for unsigned int */
17890 + ret r12
17891 +#endif
17892 +
17893 +
17894 +#ifdef L_avr32_f32_to_s32
17895 + .global __avr32_f32_to_s32
17896 + .type __avr32_f32_to_s32,@function
17897 +__avr32_f32_to_s32:
17898 + lsr r11,r12,23 /* Extract exponent */
17899 + castu.b r11
17900 + sub r11,127 /* Fix bias */
17901 + retlo 0 /* Negative exponent yields zero integer */
17902 +
17903 +#ifdef __IEEE_LARGE_FLOATS__
17904 + cp r11,31
17905 + brcc 0f
17906 +#endif
17907 + /* Shift mantissa into correct position */
17908 + rsub r11,r11,31 /* Shift amount */
17909 + lsl r10,r12,8 /* Get mantissa */
17910 + sbr r10,31 /* Add implicit bit */
17911 + lsr r10,r10,r11 /* Perform shift */
17912 + lsl r12,1 /* Check sign */
17913 + retcc r10 /* if positive, we are done */
17914 + neg r10 /* if negative float, negate result */
17915 + ret r10
17916 +
17917 +#ifdef __IEEE_LARGE_FLOATS__
17918 +0:
17919 + mov r11,-1
17920 + lsr r11,1
17921 + lsl r12,1
17922 + acr r11
17923 +
17924 + ret r11
17925 +#endif
17926 +#endif
17927 +
17928 +#ifdef L_avr32_f32_to_u32
17929 + .global __avr32_f32_to_u32
17930 + .type __avr32_f32_to_u32,@function
17931 +__avr32_f32_to_u32:
17932 + cp r12,0
17933 + retmi 0 /* Negative numbers gives 0 */
17934 + bfextu r11, r12, 23, 8 /* Extract exponent */
17935 + sub r11,127 /* Fix bias */
17936 + retlo 0 /* Negative exponent yields zero integer */
17937 +
17938 +#ifdef __IEEE_LARGE_FLOATS__
17939 + cp r11,32
17940 + brcc 0f
17941 +#endif
17942 + /* Shift mantissa into correct position */
17943 + rsub r11,r11,31 /* Shift amount */
17944 + lsl r12,8 /* Get mantissa */
17945 + sbr r12,31 /* Add implicit bit */
17946 + lsr r12,r12,r11 /* Perform shift */
17947 + ret r12
17948 +
17949 +#ifdef __IEEE_LARGE_FLOATS__
17950 +0:
17951 + mov r11,-1
17952 + lsr r11,1
17953 + lsl r12,1
17954 + acr r11
17955 +
17956 + ret r11
17957 +#endif
17958 +#endif
17959 +
17960 +#ifdef L_avr32_f32_to_f64
17961 + .global __avr32_f32_to_f64
17962 + .type __avr32_f32_to_f64,@function
17963 +
17964 +__avr32_f32_to_f64:
17965 + lsl r11,r12,1 /* Remove sign bit, keep original value in r12*/
17966 + moveq r10, 0
17967 + reteq r11 /* Return zero if input is zero */
17968 +
17969 + bfextu r9,r11,24,8 /* Get exponent */
17970 + cp.w r9,0xff /* check for NaN or inf */
17971 + breq 0f
17972 +
17973 + lsl r11,7 /* Convert sf mantissa to df format */
17974 + mov r10,0
17975 +
17976 + /* Check if implicit bit should be set */
17977 + cp.w r9, 0
17978 + subeq r9,-1 /* Adjust exponent if it was 0 */
17979 + srne r8
17980 + or r11, r11, r8 << 31 /* Set implicit bit if needed */
17981 + sub r9,(127-0x3ff) /* Convert exponent to df format exponent */
17982 +
17983 + pushm lr
17984 + normalize_df r9 /*exp*/, r10, r11 /*mantissa*/, r8, lr /*scratch*/
17985 + popm lr
17986 + pack_df r9 /*exp*/, r10, r11 /*mantissa*/, r10, r11 /*df*/
17987 +
17988 +__extendsfdf_return_op1:
17989 + /* Rotate in sign bit */
17990 + lsl r12, 1
17991 + ror r11
17992 + ret r11
17993 +
17994 +0:
17995 + /* Inf or NaN*/
17996 + lddpc r10, .Linf
17997 + lsl r11,8 /* check mantissa */
17998 + movne r11, -1 /* Return NaN */
17999 + moveq r11, r10 /* Return inf */
18000 + rjmp __extendsfdf_return_op1
18001 +#endif
18002 +
18003 +
18004 +#ifdef L_avr32_f64_to_f32
18005 + .global __avr32_f64_to_f32
18006 + .type __avr32_f64_to_f32,@function
18007 +
18008 +__avr32_f64_to_f32:
18009 + /* Unpack */
18010 + lsl r9,r11,1 /* Unpack exponent */
18011 + lsr r9,21
18012 +
18013 + reteq 0 /* If exponent is 0 the number is so small
18014 + that the conversion to single float gives
18015 + zero */
18016 +
18017 + lsl r8,r11,10 /* Adjust mantissa */
18018 + or r12,r8,r10>>22
18019 +
18020 + lsl r10,10 /* Check if there are any remaining bits
18021 + in the low part of the mantissa.*/
18022 + neg r10
18023 + rol r12 /* If there were remaining bits then set lsb
18024 + of mantissa to 1 */
18025 +
18026 + cp r9,0x7ff
18027 + breq 2f /* Check for NaN or inf */
18028 +
18029 + sub r9,(0x3ff-127) /* Adjust bias of exponent */
18030 + sbr r12,31 /* set the implicit bit.*/
18031 +
18032 + cp.w r9, 0 /* Check for subnormal number */
18033 + brgt 0f
18034 +
18035 + /* Adjust a subnormal result */
18036 + adjust_subnormal_sf r12/*sf*/,r9 /*exp*/, r12 /*mant*/, r11/*sign*/, r10 /*scratch*/
18037 + ret r12
18038 +0:
18039 + round_sf r9 /*exp*/, r12 /*mant*/, r10 /*scratch*/
18040 + pack_sf r12 /*sf*/, r9 /*exp*/, r12 /*mant*/
18041 +__truncdfsf_return_op1:
18042 + /* Rotate in sign bit */
18043 + lsl r11, 1
18044 + ror r12
18045 + ret r12
18046 +
18047 +
18048 +2:
18049 + /* NaN or inf */
18050 + cbr r12,31 /* clear implicit bit */
18051 + retne -1 /* Return NaN if mantissa not zero */
18052 + lddpc r12,.Linf_sf
18053 + ret r12 /* Return inf */
18054 +#endif
18055 +
18056 +
18057 + .align 2
18058 +.Linf:
18059 + .long 0xffe00000
18060 +
18061 + .align 2
18062 +.Linf_sf:
18063 + .long 0xff000000
18064 +
18065 diff -Nur gcc-4.1.2/gcc/config/avr32/lib2funcs.S gcc-4.1.2-owrt/gcc/config/avr32/lib2funcs.S
18066 --- gcc-4.1.2/gcc/config/avr32/lib2funcs.S 1970-01-01 01:00:00.000000000 +0100
18067 +++ gcc-4.1.2-owrt/gcc/config/avr32/lib2funcs.S 2007-05-24 12:03:28.000000000 +0200
18068 @@ -0,0 +1,21 @@
18069 + .align 4
18070 + .global __nonlocal_goto
18071 + .type __nonlocal_goto,@function
18072 +
18073 +/* __nonlocal_goto: This function handles nonlocal_goto's in gcc.
18074 +
18075 + parameter 0 (r12) = New Frame Pointer
18076 + parameter 1 (r11) = Address to goto
18077 + parameter 2 (r10) = New Stack Pointer
18078 +
18079 + This function invalidates the return stack, since it returns from a
18080 + function without using a return instruction.
18081 +*/
18082 +__nonlocal_goto:
18083 + mov r7, r12
18084 + mov sp, r10
18085 + frs # Flush return stack
18086 + mov pc, r11
18087 +
18088 +
18089 +
18090 diff -Nur gcc-4.1.2/gcc/config/avr32/linux-elf.h gcc-4.1.2-owrt/gcc/config/avr32/linux-elf.h
18091 --- gcc-4.1.2/gcc/config/avr32/linux-elf.h 1970-01-01 01:00:00.000000000 +0100
18092 +++ gcc-4.1.2-owrt/gcc/config/avr32/linux-elf.h 2007-05-24 12:03:28.000000000 +0200
18093 @@ -0,0 +1,154 @@
18094 +/*
18095 + Linux/Elf specific definitions.
18096 + Copyright 2003-2006 Atmel Corporation.
18097 +
18098 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
18099 + and Håvard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
18100 +
18101 + This file is part of GCC.
18102 +
18103 + This program is free software; you can redistribute it and/or modify
18104 + it under the terms of the GNU General Public License as published by
18105 + the Free Software Foundation; either version 2 of the License, or
18106 + (at your option) any later version.
18107 +
18108 + This program is distributed in the hope that it will be useful,
18109 + but WITHOUT ANY WARRANTY; without even the implied warranty of
18110 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18111 + GNU General Public License for more details.
18112 +
18113 + You should have received a copy of the GNU General Public License
18114 + along with this program; if not, write to the Free Software
18115 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
18116 +
18117 +
18118 +
18119 +/* elfos.h should have already been included. Now just override
18120 + any conflicting definitions and add any extras. */
18121 +
18122 +/* Run-time Target Specification. */
18123 +#undef TARGET_VERSION
18124 +#define TARGET_VERSION fputs (" (AVR32 GNU/Linux with ELF)", stderr);
18125 +
18126 +/* Do not assume anything about header files. */
18127 +#define NO_IMPLICIT_EXTERN_C
18128 +
18129 +/* The GNU C++ standard library requires that these macros be defined. */
18130 +#undef CPLUSPLUS_CPP_SPEC
18131 +#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
18132 +
18133 +/* Now we define the strings used to build the spec file. */
18134 +#undef LIB_SPEC
18135 +#define LIB_SPEC \
18136 + "%{pthread:-lpthread} \
18137 + %{shared:-lc} \
18138 + %{!shared:%{profile:-lc_p}%{!profile:-lc}}"
18139 +
18140 +/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
18141 + the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
18142 + provides part of the support for getting C++ file-scope static
18143 + object constructed before entering `main'. */
18144 +
18145 +#undef STARTFILE_SPEC
18146 +#define STARTFILE_SPEC \
18147 + "%{!shared: \
18148 + %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \
18149 + %{!p:%{profile:gcrt1.o%s} \
18150 + %{!profile:crt1.o%s}}}} \
18151 + crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
18152 +
18153 +/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
18154 + the GNU/Linux magical crtend.o file (see crtstuff.c) which
18155 + provides part of the support for getting C++ file-scope static
18156 + object constructed before entering `main', followed by a normal
18157 + GNU/Linux "finalizer" file, `crtn.o'. */
18158 +
18159 +#undef ENDFILE_SPEC
18160 +#define ENDFILE_SPEC \
18161 + "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
18162 +
18163 +#undef ASM_SPEC
18164 +#define ASM_SPEC "%{!mno-pic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{mcpu=*:-mcpu=%*}"
18165 +
18166 +#undef LINK_SPEC
18167 +#define LINK_SPEC "%{version:-v} \
18168 + %{static:-Bstatic} \
18169 + %{shared:-shared} \
18170 + %{symbolic:-Bsymbolic} \
18171 + %{rdynamic:-export-dynamic} \
18172 + %{!dynamic-linker:-dynamic-linker /lib/ld-uClibc.so.0} \
18173 + %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}}"
18174 +
18175 +#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS()
18176 +
18177 +/* This is how we tell the assembler that two symbols have the same value. */
18178 +#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
18179 + do \
18180 + { \
18181 + assemble_name (FILE, NAME1); \
18182 + fputs (" = ", FILE); \
18183 + assemble_name (FILE, NAME2); \
18184 + fputc ('\n', FILE); \
18185 + } \
18186 + while (0)
18187 +
18188 +
18189 +
18190 +#undef CC1_SPEC
18191 +#define CC1_SPEC "%{profile:-p}"
18192 +
18193 +/* Target CPU builtins. */
18194 +#define TARGET_CPU_CPP_BUILTINS() \
18195 + do \
18196 + { \
18197 + builtin_define ("__avr32__"); \
18198 + builtin_define ("__AVR32__"); \
18199 + builtin_define ("__AVR32_LINUX__"); \
18200 + builtin_define (avr32_part->macro); \
18201 + builtin_define (avr32_arch->macro); \
18202 + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \
18203 + builtin_define ("__AVR32_AVR32A__"); \
18204 + else \
18205 + builtin_define ("__AVR32_AVR32B__"); \
18206 + if (TARGET_UNALIGNED_WORD) \
18207 + builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \
18208 + if (TARGET_SIMD) \
18209 + builtin_define ("__AVR32_HAS_SIMD__"); \
18210 + if (TARGET_DSP) \
18211 + builtin_define ("__AVR32_HAS_DSP__"); \
18212 + if (TARGET_RMW) \
18213 + builtin_define ("__AVR32_HAS_RMW__"); \
18214 + if (TARGET_BRANCH_PRED) \
18215 + builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
18216 + if (flag_pic) \
18217 + { \
18218 + builtin_define ("__PIC__"); \
18219 + builtin_define ("__pic__"); \
18220 + } \
18221 + } \
18222 + while (0)
18223 +
18224 +
18225 +
18226 +/* Call the function profiler with a given profile label. */
18227 +#undef FUNCTION_PROFILER
18228 +#define FUNCTION_PROFILER(STREAM, LABELNO) \
18229 + do \
18230 + { \
18231 + fprintf (STREAM, "\tmov\tlr, lo(mcount)\n\torh\tlr, hi(mcount)\n"); \
18232 + fprintf (STREAM, "\ticall lr\n"); \
18233 + } \
18234 + while (0)
18235 +
18236 +#define NO_PROFILE_COUNTERS 1
18237 +
18238 +/* For dynamic libraries to work */
18239 +/* #define PLT_REG_CALL_CLOBBERED 1 */
18240 +#define AVR32_ALWAYS_PIC 1
18241 +
18242 +/* uclibc does not implement sinf, cosf etc. */
18243 +#undef TARGET_C99_FUNCTIONS
18244 +#define TARGET_C99_FUNCTIONS 0
18245 +
18246 +#define LINK_GCC_C_SEQUENCE_SPEC \
18247 + "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
18248 diff -Nur gcc-4.1.2/gcc/config/avr32/predicates.md gcc-4.1.2-owrt/gcc/config/avr32/predicates.md
18249 --- gcc-4.1.2/gcc/config/avr32/predicates.md 1970-01-01 01:00:00.000000000 +0100
18250 +++ gcc-4.1.2-owrt/gcc/config/avr32/predicates.md 2007-05-24 12:03:28.000000000 +0200
18251 @@ -0,0 +1,303 @@
18252 +;; AVR32 predicates file.
18253 +;; Copyright 2003-2006 Atmel Corporation.
18254 +;;
18255 +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
18256 +;;
18257 +;; This file is part of GCC.
18258 +;;
18259 +;; This program is free software; you can redistribute it and/or modify
18260 +;; it under the terms of the GNU General Public License as published by
18261 +;; the Free Software Foundation; either version 2 of the License, or
18262 +;; (at your option) any later version.
18263 +;;
18264 +;; This program is distributed in the hope that it will be useful,
18265 +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
18266 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18267 +;; GNU General Public License for more details.
18268 +;;
18269 +;; You should have received a copy of the GNU General Public License
18270 +;; along with this program; if not, write to the Free Software
18271 +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18272 +
18273 +
18274 +;; True if the operand is a memory reference which contains an
18275 +;; Address consisting of a single pointer register
18276 +(define_predicate "avr32_indirect_register_operand"
18277 + (and (match_code "mem")
18278 + (match_test "register_operand(XEXP(op, 0), SImode)")))
18279 +
18280 +
18281 +
18282 +;; Address expression with a base pointer offset with
18283 +;; a register displacement
18284 +(define_predicate "avr32_indexed_memory_operand"
18285 + (and (match_code "mem")
18286 + (match_test "GET_CODE(XEXP(op, 0)) == PLUS"))
18287 + {
18288 +
18289 + rtx op0 = XEXP(XEXP(op, 0), 0);
18290 + rtx op1 = XEXP(XEXP(op, 0), 1);
18291 +
18292 + return ((avr32_address_register_rtx_p (op0, 0)
18293 + && avr32_legitimate_index_p (GET_MODE(op), op1, 0))
18294 + || (avr32_address_register_rtx_p (op1, 0)
18295 + && avr32_legitimate_index_p (GET_MODE(op), op0, 0)));
18296 +
18297 + })
18298 +
18299 +;; Operand suitable for the ld.sb instruction
18300 +(define_predicate "load_sb_memory_operand"
18301 + (ior (match_operand 0 "avr32_indirect_register_operand")
18302 + (match_operand 0 "avr32_indexed_memory_operand")))
18303 +
18304 +
18305 +;; Operand suitable as operand to insns sign extending QI values
18306 +(define_predicate "extendqi_operand"
18307 + (ior (match_operand 0 "load_sb_memory_operand")
18308 + (match_operand 0 "register_operand")))
18309 +
18310 +(define_predicate "post_inc_memory_operand"
18311 + (and (match_code "mem")
18312 + (match_test "(GET_CODE(XEXP(op, 0)) == POST_INC)
18313 + && REG_P(XEXP(XEXP(op, 0), 0))")))
18314 +
18315 +;; Operand suitable for loading TImode values
18316 +(define_predicate "loadti_operand"
18317 + (ior (ior (match_operand 0 "register_operand")
18318 + (match_operand 0 "avr32_indirect_register_operand"))
18319 + (match_operand 0 "post_inc_memory_operand")))
18320 +
18321 +;; Operand suitable for add instructions
18322 +(define_predicate "avr32_add_operand"
18323 + (ior (match_operand 0 "register_operand")
18324 + (and (match_operand 0 "immediate_operand")
18325 + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is21\")"))))
18326 +
18327 +;; Operand is a power of two immediate
18328 +(define_predicate "power_of_two_operand"
18329 + (match_code "const_int")
18330 +{
18331 + HOST_WIDE_INT value = INTVAL (op);
18332 +
18333 + return value != 0 && (value & (value - 1)) == 0;
18334 +})
18335 +
18336 +;; Operand is a multiple of 8 immediate
18337 +(define_predicate "multiple_of_8_operand"
18338 + (match_code "const_int")
18339 +{
18340 + HOST_WIDE_INT value = INTVAL (op);
18341 +
18342 + return (value & 0x7) == 0 ;
18343 +})
18344 +
18345 +;; Operand is a multiple of 16 immediate
18346 +(define_predicate "multiple_of_16_operand"
18347 + (match_code "const_int")
18348 +{
18349 + HOST_WIDE_INT value = INTVAL (op);
18350 +
18351 + return (value & 0xf) == 0 ;
18352 +})
18353 +
18354 +;; Operand is a mask used for masking away upper bits of a reg
18355 +(define_predicate "avr32_mask_upper_bits_operand"
18356 + (match_code "const_int")
18357 +{
18358 + HOST_WIDE_INT value = INTVAL (op) + 1;
18359 +
18360 + return value != 1 && value != 0 && (value & (value - 1)) == 0;
18361 +})
18362 +
18363 +
18364 +;; Operand suitable for mul instructions
18365 +(define_predicate "avr32_mul_operand"
18366 + (ior (match_operand 0 "register_operand")
18367 + (and (match_operand 0 "immediate_operand")
18368 + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))))
18369 +
18370 +;; True for logical binary operators.
18371 +(define_predicate "logical_binary_operator"
18372 + (match_code "ior,xor,and"))
18373 +
18374 +;; True for logical shift operators
18375 +(define_predicate "logical_shift_operator"
18376 + (match_code "ashift,lshiftrt"))
18377 +
18378 +;; True for shift operand for logical and, or and eor insns
18379 +(define_predicate "avr32_logical_shift_operand"
18380 + (and (match_code "ashift,lshiftrt")
18381 + (ior (and (match_test "GET_CODE(XEXP(op, 1)) == CONST_INT")
18382 + (match_test "register_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))"))
18383 + (and (match_test "GET_CODE(XEXP(op, 0)) == CONST_INT")
18384 + (match_test "register_operand(XEXP(op, 1), GET_MODE(XEXP(op, 1)))"))))
18385 + {
18386 + return 1;
18387 + }
18388 + )
18389 +
18390 +
18391 +;; Predicate for second operand to and, ior and xor insn patterns
18392 +(define_predicate "avr32_logical_insn_operand"
18393 + (ior (match_operand 0 "register_operand")
18394 + (match_operand 0 "avr32_logical_shift_operand"))
18395 + {
18396 + return 1;
18397 + }
18398 +)
18399 +
18400 +
18401 +;; True for avr32 comparison operators
18402 +(define_predicate "avr32_comparison_operator"
18403 + (ior (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
18404 + (and (match_code "unspec")
18405 + (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
18406 + || (XINT(op, 1) == UNSPEC_COND_PL)"))))
18407 +
18408 +;; True if this is a const_int with one bit set
18409 +(define_predicate "one_bit_set_operand"
18410 + (match_code "const_int")
18411 + {
18412 + int i;
18413 + int value;
18414 + int ones = 0;
18415 +
18416 + value = INTVAL(op);
18417 + for ( i = 0 ; i < 32; i++ ){
18418 + if ( value & ( 1 << i ) ){
18419 + ones++;
18420 + }
18421 + }
18422 +
18423 + return ( ones == 1 );
18424 + })
18425 +
18426 +
18427 +;; True if this is a const_int with one bit cleared
18428 +(define_predicate "one_bit_cleared_operand"
18429 + (match_code "const_int")
18430 + {
18431 + int i;
18432 + int value;
18433 + int zeroes = 0;
18434 +
18435 + value = INTVAL(op);
18436 + for ( i = 0 ; i < 32; i++ ){
18437 + if ( !(value & ( 1 << i )) ){
18438 + zeroes++;
18439 + }
18440 + }
18441 +
18442 + return ( zeroes == 1 );
18443 + })
18444 +
18445 +
18446 +;; True if this is a register or immediate operand
18447 +(define_predicate "register_immediate_operand"
18448 + (ior (match_operand 0 "register_operand")
18449 + (match_operand 0 "immediate_operand")))
18450 +
18451 +
18452 +;; True is this is an operand containing a label_ref
18453 +(define_predicate "avr32_label_ref_operand"
18454 + (and (match_code "mem")
18455 + (match_test "avr32_find_symbol(op)
18456 + && (GET_CODE(avr32_find_symbol(op)) == LABEL_REF)")))
18457 +
18458 +;; True is this is a valid symbol pointing to the constant pool
18459 +(define_predicate "avr32_const_pool_operand"
18460 + (and (match_code "symbol_ref")
18461 + (match_test "CONSTANT_POOL_ADDRESS_P(op)"))
18462 + {
18463 + return (flag_pic ? (!(symbol_mentioned_p (get_pool_constant (op))
18464 + || label_mentioned_p (get_pool_constant (op)))
18465 + || avr32_got_mentioned_p(get_pool_constant (op)))
18466 + : true);
18467 + }
18468 +)
18469 +
18470 +;; True is this is a memory reference to the constant or mini pool
18471 +(define_predicate "avr32_const_pool_ref_operand"
18472 + (ior (match_operand 0 "avr32_label_ref_operand")
18473 + (and (match_code "mem")
18474 + (match_test "avr32_const_pool_operand(XEXP(op,0), GET_MODE(XEXP(op,0)))"))))
18475 +
18476 +
18477 +
18478 +;; True is this is a k12 offseted memory operand
18479 +(define_predicate "avr32_k12_memory_operand"
18480 + (and (match_code "mem")
18481 + (ior (match_test "REG_P(XEXP(op, 0))")
18482 + (match_test "GET_CODE(XEXP(op, 0)) == PLUS
18483 + && REG_P(XEXP(XEXP(op, 0), 0))
18484 + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
18485 + && (CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)),
18486 + 'K', (mode == SImode) ? \"Ks14\" : ((mode == HImode) ? \"Ks13\" : \"Ks12\")))"))))
18487 +
18488 +;; True is this is a memory operand with an immediate displacement
18489 +(define_predicate "avr32_imm_disp_memory_operand"
18490 + (and (match_code "mem")
18491 + (match_test "GET_CODE(XEXP(op, 0)) == PLUS
18492 + && REG_P(XEXP(XEXP(op, 0), 0))
18493 + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)")))
18494 +
18495 +;; True is this is a bswap operand
18496 +(define_predicate "avr32_bswap_operand"
18497 + (ior (match_operand 0 "avr32_k12_memory_operand")
18498 + (match_operand 0 "register_operand")))
18499 +
18500 +;; True is this is a valid coprocessor insn memory operand
18501 +(define_predicate "avr32_cop_memory_operand"
18502 + (and (match_operand 0 "memory_operand")
18503 + (not (match_test "GET_CODE(XEXP(op, 0)) == PLUS
18504 + && REG_P(XEXP(XEXP(op, 0), 0))
18505 + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
18506 + && !(CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)), 'K', \"Ku10\"))"))))
18507 +
18508 +;; True is this is a valid source/destination operand
18509 +;; for moving values to/from a coprocessor
18510 +(define_predicate "avr32_cop_move_operand"
18511 + (ior (match_operand 0 "register_operand")
18512 + (match_operand 0 "avr32_cop_memory_operand")))
18513 +
18514 +
18515 +;; True is this is a valid extract byte offset for use in
18516 +;; load extracted index insns
18517 +(define_predicate "avr32_extract_shift_operand"
18518 + (and (match_operand 0 "const_int_operand")
18519 + (match_test "(INTVAL(op) == 0) || (INTVAL(op) == 8)
18520 + || (INTVAL(op) == 16) || (INTVAL(op) == 24)")))
18521 +
18522 +;; True is this is a floating-point register
18523 +(define_predicate "avr32_fp_register_operand"
18524 + (and (match_operand 0 "register_operand")
18525 + (match_test "REGNO_REG_CLASS(REGNO(op)) == FP_REGS")))
18526 +
18527 +;; True is this is valid avr32 symbol operand
18528 +(define_predicate "avr32_symbol_operand"
18529 + (ior (match_code "label_ref, symbol_ref")
18530 + (and (match_code "const")
18531 + (match_test "avr32_find_symbol(op)"))))
18532 +
18533 +;; True is this is valid operand for the lda.w and call pseudo insns
18534 +(define_predicate "avr32_address_operand"
18535 + (and (match_code "label_ref, symbol_ref")
18536 + (ior (match_test "TARGET_HAS_ASM_ADDR_PSEUDOS")
18537 + (match_test "flag_pic")) ))
18538 +
18539 +;; True if this is a avr32 call operand
18540 +(define_predicate "avr32_call_operand"
18541 + (ior (ior (match_operand 0 "register_operand")
18542 + (ior (match_operand 0 "avr32_const_pool_ref_operand")
18543 + (match_operand 0 "avr32_address_operand")))
18544 + (match_test "SYMBOL_REF_RCALL_FUNCTION_P(op)")))
18545 +
18546 +;; Return true for operators performing ALU operations
18547 +
18548 +(define_predicate "alu_operator"
18549 + (match_code "ior, xor, and, plus, minus, ashift, lshiftrt, ashiftrt"))
18550 +
18551 +(define_predicate "avr32_add_shift_immediate_operand"
18552 + (and (match_operand 0 "immediate_operand")
18553 + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ku02\")")))
18554 +
18555 diff -Nur gcc-4.1.2/gcc/config/avr32/simd.md gcc-4.1.2-owrt/gcc/config/avr32/simd.md
18556 --- gcc-4.1.2/gcc/config/avr32/simd.md 1970-01-01 01:00:00.000000000 +0100
18557 +++ gcc-4.1.2-owrt/gcc/config/avr32/simd.md 2007-05-24 12:03:28.000000000 +0200
18558 @@ -0,0 +1,145 @@
18559 +;; AVR32 machine description file for SIMD instructions.
18560 +;; Copyright 2003-2006 Atmel Corporation.
18561 +;;
18562 +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
18563 +;;
18564 +;; This file is part of GCC.
18565 +;;
18566 +;; This program is free software; you can redistribute it and/or modify
18567 +;; it under the terms of the GNU General Public License as published by
18568 +;; the Free Software Foundation; either version 2 of the License, or
18569 +;; (at your option) any later version.
18570 +;;
18571 +;; This program is distributed in the hope that it will be useful,
18572 +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
18573 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18574 +;; GNU General Public License for more details.
18575 +;;
18576 +;; You should have received a copy of the GNU General Public License
18577 +;; along with this program; if not, write to the Free Software
18578 +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18579 +
18580 +;; -*- Mode: Scheme -*-
18581 +
18582 +
18583 +;; Vector modes
18584 +(define_mode_macro VECM [V2HI V4QI])
18585 +(define_mode_attr size [(V2HI "h") (V4QI "b")])
18586 +
18587 +(define_insn "add<mode>3"
18588 + [(set (match_operand:VECM 0 "register_operand" "=r")
18589 + (plus:VECM (match_operand:VECM 1 "register_operand" "r")
18590 + (match_operand:VECM 2 "register_operand" "r")))]
18591 + "TARGET_SIMD"
18592 + "padd.<size>\t%0, %1, %2"
18593 + [(set_attr "length" "4")
18594 + (set_attr "type" "alu")])
18595 +
18596 +
18597 +(define_insn "sub<mode>3"
18598 + [(set (match_operand:VECM 0 "register_operand" "=r")
18599 + (minus:VECM (match_operand:VECM 1 "register_operand" "r")
18600 + (match_operand:VECM 2 "register_operand" "r")))]
18601 + "TARGET_SIMD"
18602 + "psub.<size>\t%0, %1, %2"
18603 + [(set_attr "length" "4")
18604 + (set_attr "type" "alu")])
18605 +
18606 +
18607 +(define_insn "abs<mode>2"
18608 + [(set (match_operand:VECM 0 "register_operand" "=r")
18609 + (abs:VECM (match_operand:VECM 1 "register_operand" "r")))]
18610 + "TARGET_SIMD"
18611 + "pabs.s<size>\t%0, %1"
18612 + [(set_attr "length" "4")
18613 + (set_attr "type" "alu")])
18614 +
18615 +(define_insn "ashl<mode>3"
18616 + [(set (match_operand:VECM 0 "register_operand" "=r")
18617 + (ashift:VECM (match_operand:VECM 1 "register_operand" "r")
18618 + (match_operand:SI 2 "immediate_operand" "Ku04")))]
18619 + "TARGET_SIMD"
18620 + "plsl.<size>\t%0, %1, %2"
18621 + [(set_attr "length" "4")
18622 + (set_attr "type" "alu")])
18623 +
18624 +(define_insn "ashr<mode>3"
18625 + [(set (match_operand:VECM 0 "register_operand" "=r")
18626 + (ashiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
18627 + (match_operand:SI 2 "immediate_operand" "Ku04")))]
18628 + "TARGET_SIMD"
18629 + "pasr.<size>\t%0, %1, %2"
18630 + [(set_attr "length" "4")
18631 + (set_attr "type" "alu")])
18632 +
18633 +(define_insn "lshr<mode>3"
18634 + [(set (match_operand:VECM 0 "register_operand" "=r")
18635 + (lshiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
18636 + (match_operand:SI 2 "immediate_operand" "Ku04")))]
18637 + "TARGET_SIMD"
18638 + "plsr.<size>\t%0, %1, %2"
18639 + [(set_attr "length" "4")
18640 + (set_attr "type" "alu")])
18641 +
18642 +(define_insn "smaxv2hi3"
18643 + [(set (match_operand:V2HI 0 "register_operand" "=r")
18644 + (smax:V2HI (match_operand:V2HI 1 "register_operand" "r")
18645 + (match_operand:V2HI 2 "register_operand" "r")))]
18646 +
18647 + "TARGET_SIMD"
18648 + "pmax.sh\t%0, %1, %2"
18649 + [(set_attr "length" "4")
18650 + (set_attr "type" "alu")])
18651 +
18652 +(define_insn "sminv2hi3"
18653 + [(set (match_operand:V2HI 0 "register_operand" "=r")
18654 + (smin:V2HI (match_operand:V2HI 1 "register_operand" "r")
18655 + (match_operand:V2HI 2 "register_operand" "r")))]
18656 +
18657 + "TARGET_SIMD"
18658 + "pmin.sh\t%0, %1, %2"
18659 + [(set_attr "length" "4")
18660 + (set_attr "type" "alu")])
18661 +
18662 +(define_insn "umaxv4qi3"
18663 + [(set (match_operand:V4QI 0 "register_operand" "=r")
18664 + (umax:V4QI (match_operand:V4QI 1 "register_operand" "r")
18665 + (match_operand:V4QI 2 "register_operand" "r")))]
18666 +
18667 + "TARGET_SIMD"
18668 + "pmax.ub\t%0, %1, %2"
18669 + [(set_attr "length" "4")
18670 + (set_attr "type" "alu")])
18671 +
18672 +(define_insn "uminv4qi3"
18673 + [(set (match_operand:V4QI 0 "register_operand" "=r")
18674 + (umin:V4QI (match_operand:V4QI 1 "register_operand" "r")
18675 + (match_operand:V4QI 2 "register_operand" "r")))]
18676 +
18677 + "TARGET_SIMD"
18678 + "pmin.ub\t%0, %1, %2"
18679 + [(set_attr "length" "4")
18680 + (set_attr "type" "alu")])
18681 +
18682 +
18683 +(define_insn "addsubv2hi"
18684 + [(set (match_operand:V2HI 0 "register_operand" "=r")
18685 + (vec_concat:V2HI
18686 + (plus:HI (match_operand:HI 1 "register_operand" "r")
18687 + (match_operand:HI 2 "register_operand" "r"))
18688 + (minus:HI (match_dup 1) (match_dup 2))))]
18689 + "TARGET_SIMD"
18690 + "paddsub.h\t%0, %1:b, %2:b"
18691 + [(set_attr "length" "4")
18692 + (set_attr "type" "alu")])
18693 +
18694 +(define_insn "subaddv2hi"
18695 + [(set (match_operand:V2HI 0 "register_operand" "=r")
18696 + (vec_concat:V2HI
18697 + (minus:HI (match_operand:HI 1 "register_operand" "r")
18698 + (match_operand:HI 2 "register_operand" "r"))
18699 + (plus:HI (match_dup 1) (match_dup 2))))]
18700 + "TARGET_SIMD"
18701 + "psubadd.h\t%0, %1:b, %2:b"
18702 + [(set_attr "length" "4")
18703 + (set_attr "type" "alu")])
18704 diff -Nur gcc-4.1.2/gcc/config/avr32/t-avr32 gcc-4.1.2-owrt/gcc/config/avr32/t-avr32
18705 --- gcc-4.1.2/gcc/config/avr32/t-avr32 1970-01-01 01:00:00.000000000 +0100
18706 +++ gcc-4.1.2-owrt/gcc/config/avr32/t-avr32 2007-05-24 12:03:28.000000000 +0200
18707 @@ -0,0 +1,63 @@
18708 +
18709 +MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \
18710 + $(srcdir)/config/avr32/fpcp.md \
18711 + $(srcdir)/config/avr32/simd.md \
18712 + $(srcdir)/config/avr32/predicates.md
18713 +
18714 +s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
18715 + s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
18716 +
18717 +# We want fine grained libraries, so use the new code
18718 +# to build the floating point emulation libraries.
18719 +FPBIT = fp-bit.c
18720 +DPBIT = dp-bit.c
18721 +
18722 +LIB1ASMSRC = avr32/lib1funcs.S
18723 +LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_addsub _avr32_f64_to_u32 _avr32_f64_to_s32 \
18724 + _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 _avr32_s32_to_f64 \
18725 + _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \
18726 + _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt \
18727 + _avr32_f64_div _avr32_f32_div\
18728 + _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \
18729 + _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32
18730 +
18731 +LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S
18732 +
18733 +MULTILIB_OPTIONS = march=ap/march=uc
18734 +MULTILIB_DIRNAMES = ap uc
18735 +MULTILIB_EXCEPTIONS =
18736 +MULTILIB_MATCHES = march?ap=mcpu?ap7000
18737 +MULTILIB_MATCHES += march?ap=mcpu?ap7010
18738 +MULTILIB_MATCHES += march?ap=mcpu?ap7020
18739 +MULTILIB_MATCHES += march?uc=mcpu?uc3a0256
18740 +MULTILIB_MATCHES += march?uc=mcpu?uc3a0512
18741 +MULTILIB_MATCHES += march?uc=mcpu?uc3a1128
18742 +MULTILIB_MATCHES += march?uc=mcpu?uc3a1256
18743 +MULTILIB_MATCHES += march?uc=mcpu?uc3a1512
18744 +MULTILIB_MATCHES += march?ap=mpart?ap7000
18745 +MULTILIB_MATCHES += march?ap=mpart?ap7010
18746 +MULTILIB_MATCHES += march?ap=mpart?ap7020
18747 +MULTILIB_MATCHES += march?uc=mpart?uc3a0256
18748 +MULTILIB_MATCHES += march?uc=mpart?uc3a0512
18749 +MULTILIB_MATCHES += march?uc=mpart?uc3a1128
18750 +MULTILIB_MATCHES += march?uc=mpart?uc3a1256
18751 +MULTILIB_MATCHES += march?uc=mpart?uc3a1512
18752 +
18753 +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
18754 +
18755 +CRTSTUFF_T_CFLAGS = -mrelax
18756 +CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC
18757 +TARGET_LIBGCC2_CFLAGS += -mrelax
18758 +
18759 +LIBGCC = stmp-multilib
18760 +INSTALL_LIBGCC = install-multilib
18761 +
18762 +fp-bit.c: $(srcdir)/config/fp-bit.c
18763 + echo '#define FLOAT' > fp-bit.c
18764 + cat $(srcdir)/config/fp-bit.c >> fp-bit.c
18765 +
18766 +dp-bit.c: $(srcdir)/config/fp-bit.c
18767 + cat $(srcdir)/config/fp-bit.c > dp-bit.c
18768 +
18769 +
18770 +
18771 diff -Nur gcc-4.1.2/gcc/config/avr32/t-elf gcc-4.1.2-owrt/gcc/config/avr32/t-elf
18772 --- gcc-4.1.2/gcc/config/avr32/t-elf 1970-01-01 01:00:00.000000000 +0100
18773 +++ gcc-4.1.2-owrt/gcc/config/avr32/t-elf 2007-05-24 12:03:28.000000000 +0200
18774 @@ -0,0 +1,16 @@
18775 +
18776 +# Assemble startup files.
18777 +$(T)crti.o: $(srcdir)/config/avr32/crti.asm $(GCC_PASSES)
18778 + $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
18779 + -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/avr32/crti.asm
18780 +
18781 +$(T)crtn.o: $(srcdir)/config/avr32/crtn.asm $(GCC_PASSES)
18782 + $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
18783 + -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/avr32/crtn.asm
18784 +
18785 +
18786 +# Build the libraries for both hard and soft floating point
18787 +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
18788 +
18789 +LIBGCC = stmp-multilib
18790 +INSTALL_LIBGCC = install-multilib
18791 diff -Nur gcc-4.1.2/gcc/config/avr32/uclinux-elf.h gcc-4.1.2-owrt/gcc/config/avr32/uclinux-elf.h
18792 --- gcc-4.1.2/gcc/config/avr32/uclinux-elf.h 1970-01-01 01:00:00.000000000 +0100
18793 +++ gcc-4.1.2-owrt/gcc/config/avr32/uclinux-elf.h 2007-05-24 12:03:28.000000000 +0200
18794 @@ -0,0 +1,20 @@
18795 +
18796 +/* Run-time Target Specification. */
18797 +#undef TARGET_VERSION
18798 +#define TARGET_VERSION fputs (" (AVR32 uClinux with ELF)", stderr)
18799 +
18800 +/* We don't want a .jcr section on uClinux. As if this makes a difference... */
18801 +#define TARGET_USE_JCR_SECTION 0
18802 +
18803 +/* Here we go. Drop the crtbegin/crtend stuff completely. */
18804 +#undef STARTFILE_SPEC
18805 +#define STARTFILE_SPEC \
18806 + "%{!shared: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s}" \
18807 + " %{!p:%{profile:gcrt1.o%s}" \
18808 + " %{!profile:crt1.o%s}}}} crti.o%s"
18809 +
18810 +#undef ENDFILE_SPEC
18811 +#define ENDFILE_SPEC "crtn.o%s"
18812 +
18813 +#undef TARGET_DEFAULT
18814 +#define TARGET_DEFAULT (AVR32_FLAG_NO_INIT_GOT)
18815 diff -Nur gcc-4.1.2/gcc/config/host-linux.c gcc-4.1.2-owrt/gcc/config/host-linux.c
18816 --- gcc-4.1.2/gcc/config/host-linux.c 2005-08-01 19:43:33.000000000 +0200
18817 +++ gcc-4.1.2-owrt/gcc/config/host-linux.c 2007-05-24 12:03:28.000000000 +0200
18818 @@ -26,6 +26,9 @@
18819 #include "hosthooks.h"
18820 #include "hosthooks-def.h"
18821
18822 +#ifndef SSIZE_MAX
18823 +#define SSIZE_MAX LONG_MAX
18824 +#endif
18825
18826 /* Linux has a feature called exec-shield-randomize that perturbs the
18827 address of non-fixed mapped segments by a (relatively) small amount.
18828 diff -Nur gcc-4.1.2/gcc/config.gcc gcc-4.1.2-owrt/gcc/config.gcc
18829 --- gcc-4.1.2/gcc/config.gcc 2006-10-16 01:12:23.000000000 +0200
18830 +++ gcc-4.1.2-owrt/gcc/config.gcc 2007-05-24 12:03:28.000000000 +0200
18831 @@ -751,6 +751,24 @@
18832 tm_file="avr/avr.h dbxelf.h"
18833 use_fixproto=yes
18834 ;;
18835 +avr32*-*-linux*)
18836 + tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/avr32.h "
18837 + tmake_file="t-linux avr32/t-avr32 avr32/t-elf"
18838 + extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
18839 + extra_modes=avr32/avr32-modes.def
18840 + gnu_ld=yes
18841 + ;;
18842 +avr32*-*-uclinux*)
18843 + tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/uclinux-elf.h avr32/avr32.h"
18844 + tmake_file="t-linux avr32/t-avr32 avr32/t-elf"
18845 + extra_modes=avr32/avr32-modes.def
18846 + gnu_ld=yes
18847 + ;;
18848 +avr32-*-*)
18849 + tm_file="dbxelf.h elfos.h avr32/avr32.h avr32/avr32-elf.h"
18850 + tmake_file="avr32/t-avr32 avr32/t-elf"
18851 + extra_modes=avr32/avr32-modes.def
18852 + ;;
18853 bfin*-elf*)
18854 tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h"
18855 tmake_file=bfin/t-bfin-elf
18856 @@ -1614,6 +1632,9 @@
18857 pdp11-*-*)
18858 use_fixproto=yes
18859 ;;
18860 +avr-*-*)
18861 + use_fixproto=yes
18862 + ;;
18863 # port not yet contributed
18864 #powerpc-*-openbsd*)
18865 # tmake_file="${tmake_file} rs6000/t-fprules "
18866 @@ -2575,6 +2596,21 @@
18867 fi
18868 ;;
18869
18870 + avr32*-*-*)
18871 + supported_defaults="cpu"
18872 +
18873 + case "$with_cpu" in
18874 + "" \
18875 + | morgan | ap7000 )
18876 + # OK
18877 + ;;
18878 + *)
18879 + echo "Unknown arch used in --with-arch=$with_arch" 1>&2
18880 + exit 1
18881 + ;;
18882 + esac
18883 + ;;
18884 +
18885 fr*-*-*linux*)
18886 supported_defaults=cpu
18887 case "$with_cpu" in
18888 diff -Nur gcc-4.1.2/gcc/doc/extend.texi gcc-4.1.2-owrt/gcc/doc/extend.texi
18889 --- gcc-4.1.2/gcc/doc/extend.texi 2007-01-28 20:26:29.000000000 +0100
18890 +++ gcc-4.1.2-owrt/gcc/doc/extend.texi 2007-05-24 12:03:28.000000000 +0200
18891 @@ -1887,7 +1887,7 @@
18892
18893 @item interrupt
18894 @cindex interrupt handler functions
18895 -Use this attribute on the ARM, AVR, C4x, CRX, M32C, M32R/D, MS1, and Xstormy16
18896 +Use this attribute on the ARM, AVR, AVR32, C4x, CRX, M32C, M32R/D, MS1, and Xstormy16
18897 ports to indicate that the specified function is an interrupt handler.
18898 The compiler will generate function entry and exit sequences suitable
18899 for use in an interrupt handler when this attribute is present.
18900 @@ -1906,6 +1906,15 @@
18901
18902 Permissible values for this parameter are: IRQ, FIQ, SWI, ABORT and UNDEF@.
18903
18904 +Note, for the AVR32, you can specify which banking scheme is used for
18905 +the interrupt mode this interrupt handler is used in like this:
18906 +
18907 +@smallexample
18908 +void f () __attribute__ ((interrupt ("FULL")));
18909 +@end smallexample
18910 +
18911 +Permissible values for this parameter are: FULL, HALF, NONE and UNDEF.
18912 +
18913 @item interrupt_handler
18914 @cindex interrupt handler functions on the Blackfin, m68k, H8/300 and SH processors
18915 Use this attribute on the Blackfin, m68k, H8/300, H8/300H, H8S, and SH to
18916 @@ -5807,6 +5816,7 @@
18917 @menu
18918 * Alpha Built-in Functions::
18919 * ARM Built-in Functions::
18920 +* AVR32 Built-in Functions::
18921 * Blackfin Built-in Functions::
18922 * FR-V Built-in Functions::
18923 * X86 Built-in Functions::
18924 @@ -6045,6 +6055,54 @@
18925 long long __builtin_arm_wzero ()
18926 @end smallexample
18927
18928 +@node AVR32 Built-in Functions
18929 +@subsection AVR32 Built-in Functions
18930 +
18931 +
18932 +@smallexample
18933 +
18934 +int __builtin_sats (int /*Rd*/,int /*sa*/, int /*bn*/)
18935 +int __builtin_satu (int /*Rd*/,int /*sa*/, int /*bn*/)
18936 +int __builtin_satrnds (int /*Rd*/,int /*sa*/, int /*bn*/)
18937 +int __builtin_satrndu (int /*Rd*/,int /*sa*/, int /*bn*/)
18938 +short __builtin_mulsathh_h (short, short)
18939 +int __builtin_mulsathh_w (short, short)
18940 +short __builtin_mulsatrndhh_h (short, short)
18941 +int __builtin_mulsatrndwh_w (int, short)
18942 +int __builtin_mulsatwh_w (int, short)
18943 +int __builtin_macsathh_w (int, short, short)
18944 +short __builtin_satadd_h (short, short)
18945 +short __builtin_satsub_h (short, short)
18946 +int __builtin_satadd_w (int, int)
18947 +int __builtin_satsub_w (int, int)
18948 +long long __builtin_mulwh_d(int, short)
18949 +long long __builtin_mulnwh_d(int, short)
18950 +long long __builtin_macwh_d(long long, int, short)
18951 +long long __builtin_machh_d(long long, short, short)
18952 +
18953 +void __builtin_musfr(int);
18954 +int __builtin_mustr(void);
18955 +int __builtin_mfsr(int /*Status Register Address*/)
18956 +void __builtin_mtsr(int /*Status Register Address*/, int /*Value*/)
18957 +int __builtin_mfdr(int /*Debug Register Address*/)
18958 +void __builtin_mtdr(int /*Debug Register Address*/, int /*Value*/)
18959 +void __builtin_cache(void * /*Address*/, int /*Cache Operation*/)
18960 +void __builtin_sync(int /*Sync Operation*/)
18961 +void __builtin_tlbr(void)
18962 +void __builtin_tlbs(void)
18963 +void __builtin_tlbw(void)
18964 +void __builtin_breakpoint(void)
18965 +int __builtin_xchg(void * /*Address*/, int /*Value*/ )
18966 +short __builtin_bswap_16(short)
18967 +int __builtin_bswap_32(int)
18968 +void __builtin_cop(int/*cpnr*/, int/*crd*/, int/*crx*/, int/*cry*/, int/*op*/)
18969 +int __builtin_mvcr_w(int/*cpnr*/, int/*crs*/)
18970 +void __builtin_mvrc_w(int/*cpnr*/, int/*crd*/, int/*value*/)
18971 +long long __builtin_mvcr_d(int/*cpnr*/, int/*crs*/)
18972 +void __builtin_mvrc_d(int/*cpnr*/, int/*crd*/, long long/*value*/)
18973 +
18974 +@end smallexample
18975 +
18976 @node Blackfin Built-in Functions
18977 @subsection Blackfin Built-in Functions
18978
18979 diff -Nur gcc-4.1.2/gcc/doc/invoke.texi gcc-4.1.2-owrt/gcc/doc/invoke.texi
18980 --- gcc-4.1.2/gcc/doc/invoke.texi 2006-09-25 23:21:58.000000000 +0200
18981 +++ gcc-4.1.2-owrt/gcc/doc/invoke.texi 2007-05-24 12:03:28.000000000 +0200
18982 @@ -185,7 +185,7 @@
18983 -fno-default-inline -fvisibility-inlines-hidden @gol
18984 -Wabi -Wctor-dtor-privacy @gol
18985 -Wnon-virtual-dtor -Wreorder @gol
18986 --Weffc++ -Wno-deprecated -Wstrict-null-sentinel @gol
18987 +-Weffc++ -Wno-deprecated @gol
18988 -Wno-non-template-friend -Wold-style-cast @gol
18989 -Woverloaded-virtual -Wno-pmf-conversions @gol
18990 -Wsign-promo}
18991 @@ -569,6 +569,10 @@
18992 -mauto-incdec -minmax -mlong-calls -mshort @gol
18993 -msoft-reg-count=@var{count}}
18994
18995 +@emph{AVR32 Options}
18996 +@gccoptlist{-muse-rodata-section -mhard-float -msoft-float -mrelax @gol
18997 +-muse-oscall -mforce-double-align -mno-init-got -mcpu=@var{cpu}}
18998 +
18999 @emph{MCore Options}
19000 @gccoptlist{-mhardlit -mno-hardlit -mdiv -mno-div -mrelax-immediates @gol
19001 -mno-relax-immediates -mwide-bitfields -mno-wide-bitfields @gol
19002 @@ -1797,14 +1801,6 @@
19003 @opindex Wno-deprecated
19004 Do not warn about usage of deprecated features. @xref{Deprecated Features}.
19005
19006 -@item -Wstrict-null-sentinel @r{(C++ only)}
19007 -@opindex Wstrict-null-sentinel
19008 -Warn also about the use of an uncasted @code{NULL} as sentinel. When
19009 -compiling only with GCC this is a valid sentinel, as @code{NULL} is defined
19010 -to @code{__null}. Although it is a null pointer constant not a null pointer,
19011 -it is guaranteed to of the same size as a pointer. But this use is
19012 -not portable across different compilers.
19013 -
19014 @item -Wno-non-template-friend @r{(C++ only)}
19015 @opindex Wno-non-template-friend
19016 Disable warnings when non-templatized friend functions are declared
19017 @@ -2662,13 +2658,11 @@
19018 If you want to warn about code which uses the uninitialized value of the
19019 variable in its own initializer, use the @option{-Winit-self} option.
19020
19021 -These warnings occur for individual uninitialized or clobbered
19022 -elements of structure, union or array variables as well as for
19023 -variables which are uninitialized or clobbered as a whole. They do
19024 -not occur for variables or elements declared @code{volatile}. Because
19025 -these warnings depend on optimization, the exact variables or elements
19026 -for which there are warnings will depend on the precise optimization
19027 -options and version of GCC used.
19028 +These warnings occur only for variables that are candidates for
19029 +register allocation. Therefore, they do not occur for a variable that
19030 +is declared @code{volatile}, or whose address is taken, or whose size
19031 +is other than 1, 2, 4 or 8 bytes. Also, they do not occur for
19032 +structures, unions or arrays, even when they are in registers.
19033
19034 Note that there may be no warning about a variable that is used only
19035 to compute a value that itself is never used, because such
19036 @@ -5935,10 +5929,6 @@
19037 we always try to remove unnecessary ivs from the set during its
19038 optimization when a new iv is added to the set.
19039
19040 -@item scev-max-expr-size
19041 -Bound on size of expressions used in the scalar evolutions analyzer.
19042 -Large expressions slow the analyzer.
19043 -
19044 @item vect-max-version-checks
19045 The maximum number of runtime checks that can be performed when doing
19046 loop versioning in the vectorizer. See option ftree-vect-loop-version
19047 @@ -7115,7 +7105,7 @@
19048 * ARC Options::
19049 * ARM Options::
19050 * AVR Options::
19051 -* Blackfin Options::
19052 +* AVR32 Options::
19053 * CRIS Options::
19054 * CRX Options::
19055 * Darwin Options::
19056 @@ -7578,81 +7568,55 @@
19057 size.
19058 @end table
19059
19060 -@node Blackfin Options
19061 -@subsection Blackfin Options
19062 -@cindex Blackfin Options
19063 +@node AVR32 Options
19064 +@subsection AVR32 Options
19065 +@cindex AVR32 Options
19066
19067 -@table @gcctabopt
19068 -@item -momit-leaf-frame-pointer
19069 -@opindex momit-leaf-frame-pointer
19070 -Don't keep the frame pointer in a register for leaf functions. This
19071 -avoids the instructions to save, set up and restore frame pointers and
19072 -makes an extra register available in leaf functions. The option
19073 -@option{-fomit-frame-pointer} removes the frame pointer for all functions
19074 -which might make debugging harder.
19075 +These options are defined for AVR32 implementations:
19076
19077 -@item -mspecld-anomaly
19078 -@opindex mspecld-anomaly
19079 -When enabled, the compiler will ensure that the generated code does not
19080 -contain speculative loads after jump instructions. This option is enabled
19081 -by default.
19082 -
19083 -@item -mno-specld-anomaly
19084 -@opindex mno-specld-anomaly
19085 -Don't generate extra code to prevent speculative loads from occurring.
19086 -
19087 -@item -mcsync-anomaly
19088 -@opindex mcsync-anomaly
19089 -When enabled, the compiler will ensure that the generated code does not
19090 -contain CSYNC or SSYNC instructions too soon after conditional branches.
19091 -This option is enabled by default.
19092 -
19093 -@item -mno-csync-anomaly
19094 -@opindex mno-csync-anomaly
19095 -Don't generate extra code to prevent CSYNC or SSYNC instructions from
19096 -occurring too soon after a conditional branch.
19097 -
19098 -@item -mlow-64k
19099 -@opindex mlow-64k
19100 -When enabled, the compiler is free to take advantage of the knowledge that
19101 -the entire program fits into the low 64k of memory.
19102 -
19103 -@item -mno-low-64k
19104 -@opindex mno-low-64k
19105 -Assume that the program is arbitrarily large. This is the default.
19106 +@table @gcctabopt
19107 +@item -muse-rodata-section
19108 +@opindex muse-rodata-section
19109 +Use section @samp{.rodata} for read-only data instead of @samp{.text}.
19110
19111 -@item -mid-shared-library
19112 -@opindex mid-shared-library
19113 -Generate code that supports shared libraries via the library ID method.
19114 -This allows for execute in place and shared libraries in an environment
19115 -without virtual memory management. This option implies @option{-fPIC}.
19116 +@item -mhard-float
19117 +@opindex mhard-float
19118 +Use floating-point coprocessor instructions.
19119
19120 -@item -mno-id-shared-library
19121 -@opindex mno-id-shared-library
19122 -Generate code that doesn't assume ID based shared libraries are being used.
19123 -This is the default.
19124 +@item -msoft-float
19125 +@opindex msoft-float
19126 +Use software floating-point library.
19127
19128 -@item -mshared-library-id=n
19129 -@opindex mshared-library-id
19130 -Specified the identification number of the ID based shared library being
19131 -compiled. Specifying a value of 0 will generate more compact code, specifying
19132 -other values will force the allocation of that number to the current
19133 -library but is no more space or time efficient than omitting this option.
19134 +@item -mrelax
19135 +@opindex mrelax
19136 +Enable relaxing in linker. This means that when the address of symbols
19137 +are known at link time, the linker can optimize @samp{icall} and @samp{mcall}
19138 +instructions into a @samp{rcall} instruction if possible. Loading the address
19139 +of a symbol can also be optimized.
19140 +
19141 +@item -muse-oscall
19142 +@opindex muse-oscall
19143 +When using gcc as a frontend for linking this switch forces the use of
19144 +@samp{fake} system calls in the newlib c-library. These fake system
19145 +calls are handled by some AVR32 simulators which redirects these calls
19146 +to the OS in which the simulator is running. This is practical for
19147 +being able to perform file I/O when running programs in a simulator.
19148 +
19149 +@item -mforce-double-align
19150 +@opindex mforce-double-align
19151 +Force double-word alignment for double-word memory accesses.
19152 +
19153 +@item -mno-init-got
19154 +@opindex mno-init-got
19155 +Do not initialize the GOT register before using it when compiling PIC
19156 +code.
19157
19158 -@item -mlong-calls
19159 -@itemx -mno-long-calls
19160 -@opindex mlong-calls
19161 -@opindex mno-long-calls
19162 -Tells the compiler to perform function calls by first loading the
19163 -address of the function into a register and then performing a subroutine
19164 -call on this register. This switch is needed if the target function
19165 -will lie outside of the 24 bit addressing range of the offset based
19166 -version of subroutine call instruction.
19167 +@item -mcpu=@var{cpu-type}
19168 +@opindex mcpu
19169 +Generate code for the specified cpu. Permissible names are: @samp{morgan},
19170 +@samp{ap7000} and @samp{default}. @samp{default} is a dummy cpu which
19171 +allows all avr32 instructions.
19172
19173 -This feature is not enabled by default. Specifying
19174 -@option{-mno-long-calls} will restore the default behavior. Note these
19175 -switches have no effect on how the compiler generates code to handle
19176 -function calls via function pointers.
19177 @end table
19178
19179 @node CRIS Options
19180 @@ -11341,6 +11305,7 @@
19181 Application Binary Interface, PowerPC processor supplement. This is the
19182 default unless you configured GCC using @samp{powerpc-*-eabiaix}.
19183
19184 +
19185 @item -mcall-sysv-eabi
19186 @opindex mcall-sysv-eabi
19187 Specify both @option{-mcall-sysv} and @option{-meabi} options.
19188 diff -Nur gcc-4.1.2/gcc/doc/md.texi gcc-4.1.2-owrt/gcc/doc/md.texi
19189 --- gcc-4.1.2/gcc/doc/md.texi 2005-12-16 14:11:58.000000000 +0100
19190 +++ gcc-4.1.2-owrt/gcc/doc/md.texi 2007-05-24 12:03:28.000000000 +0200
19191 @@ -1686,6 +1686,59 @@
19192 A memory reference suitable for the ARMv4 ldrsb instruction.
19193 @end table
19194
19195 +@item AVR32 family---@file{avr32.h}
19196 +@table @code
19197 +@item f
19198 +Floating-point registers (f0 to f15)
19199 +
19200 +@item Ku@var{bits}
19201 +Unsigned constant representable with @var{bits} number of bits (Must be
19202 +two digits). I.e: An unsigned 8-bit constant is written as @samp{Ku08}
19203 +
19204 +@item Ks@var{bits}
19205 +Signed constant representable with @var{bits} number of bits (Must be
19206 +two digits). I.e: A signed 12-bit constant is written as @samp{Ks12}
19207 +
19208 +@item Is@var{bits}
19209 +The negated range of a signed constant representable with @var{bits}
19210 +number of bits. The same as @samp{Ks@var{bits}} with a negated range.
19211 +This means that the constant must be in the range @math{-2^{bits-1}-1} to @math{2^{bits-1}}
19212 +
19213 +@item G
19214 +A single/double precision floating-point immediate or 64-bit integer
19215 +immediate where the least and most significant words both can be
19216 +loaded with a move instruction. That is the the integer form of the
19217 +values in the least and most significant words both are in the range
19218 +@math{-2^{20}} to @math{2^{20}-1}.
19219 +
19220 +@item RKs@var{bits}
19221 +A memory reference where the address consists of a base register
19222 +plus a signed immediate displacement with range given by @samp{Ks@var{bits}}
19223 +which has the same format as for the signed immediate integer constraint
19224 +given above.
19225 +
19226 +@item RKu@var{bits}
19227 +A memory reference where the address consists of a base register
19228 +plus an unsigned immediate displacement with range given by @samp{Ku@var{bits}}
19229 +which has the same format as for the unsigned immediate integer constraint
19230 +given above.
19231 +
19232 +@item S
19233 +A memory reference with an immediate or register offset
19234 +
19235 +@item T
19236 +A memory reference to a constant pool entry
19237 +
19238 +@item W
19239 +A valid operand for use in the @samp{lda.w} instruction macro when
19240 +relaxing is enabled
19241 +
19242 +@item Z
19243 +A memory reference valid for coprocessor memory instructions
19244 +
19245 +@end table
19246 +
19247 +
19248 @item AVR family---@file{avr.h}
19249 @table @code
19250 @item l
19251 @@ -2132,102 +2185,6 @@
19252
19253 @end table
19254
19255 -@item Blackfin family---@file{bfin.h}
19256 -@table @code
19257 -@item a
19258 -P register
19259 -
19260 -@item d
19261 -D register
19262 -
19263 -@item z
19264 -A call clobbered P register.
19265 -
19266 -@item D
19267 -Even-numbered D register
19268 -
19269 -@item W
19270 -Odd-numbered D register
19271 -
19272 -@item e
19273 -Accumulator register.
19274 -
19275 -@item A
19276 -Even-numbered accumulator register.
19277 -
19278 -@item B
19279 -Odd-numbered accumulator register.
19280 -
19281 -@item b
19282 -I register
19283 -
19284 -@item B
19285 -B register
19286 -
19287 -@item f
19288 -M register
19289 -
19290 -@item c
19291 -Registers used for circular buffering, i.e. I, B, or L registers.
19292 -
19293 -@item C
19294 -The CC register.
19295 -
19296 -@item x
19297 -Any D, P, B, M, I or L register.
19298 -
19299 -@item y
19300 -Additional registers typically used only in prologues and epilogues: RETS,
19301 -RETN, RETI, RETX, RETE, ASTAT, SEQSTAT and USP.
19302 -
19303 -@item w
19304 -Any register except accumulators or CC.
19305 -
19306 -@item Ksh
19307 -Signed 16 bit integer (in the range -32768 to 32767)
19308 -
19309 -@item Kuh
19310 -Unsigned 16 bit integer (in the range 0 to 65535)
19311 -
19312 -@item Ks7
19313 -Signed 7 bit integer (in the range -64 to 63)
19314 -
19315 -@item Ku7
19316 -Unsigned 7 bit integer (in the range 0 to 127)
19317 -
19318 -@item Ku5
19319 -Unsigned 5 bit integer (in the range 0 to 31)
19320 -
19321 -@item Ks4
19322 -Signed 4 bit integer (in the range -8 to 7)
19323 -
19324 -@item Ks3
19325 -Signed 3 bit integer (in the range -3 to 4)
19326 -
19327 -@item Ku3
19328 -Unsigned 3 bit integer (in the range 0 to 7)
19329 -
19330 -@item P@var{n}
19331 -Constant @var{n}, where @var{n} is a single-digit constant in the range 0 to 4.
19332 -
19333 -@item M1
19334 -Constant 255.
19335 -
19336 -@item M2
19337 -Constant 65535.
19338 -
19339 -@item J
19340 -An integer constant with exactly a single bit set.
19341 -
19342 -@item L
19343 -An integer constant with all bits set except exactly one.
19344 -
19345 -@item H
19346 -
19347 -@item Q
19348 -Any SYMBOL_REF.
19349 -@end table
19350 -
19351 @item M32C---@file{m32c.c}
19352
19353 @item Rsp
19354 diff -Nur gcc-4.1.2/gcc/expr.c gcc-4.1.2-owrt/gcc/expr.c
19355 --- gcc-4.1.2/gcc/expr.c 2006-11-02 18:18:52.000000000 +0100
19356 +++ gcc-4.1.2-owrt/gcc/expr.c 2007-05-24 12:03:28.000000000 +0200
19357 @@ -3401,18 +3401,19 @@
19358 }
19359 else
19360 {
19361 + emit_move_insn (stack_pointer_rtx,
19362 + expand_binop (Pmode,
19363 #ifdef STACK_GROWS_DOWNWARD
19364 - /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
19365 - dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
19366 - GEN_INT (-(HOST_WIDE_INT) rounded_size));
19367 + sub_optab,
19368 #else
19369 - /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
19370 - dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
19371 - GEN_INT (rounded_size));
19372 + add_optab,
19373 #endif
19374 - dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
19375 + stack_pointer_rtx,
19376 + GEN_INT (rounded_size),
19377 + NULL_RTX, 0, OPTAB_LIB_WIDEN));
19378 + dest_addr = stack_pointer_rtx;
19379 }
19380 -
19381 +
19382 dest = gen_rtx_MEM (mode, dest_addr);
19383
19384 if (type != 0)
19385 diff -Nur gcc-4.1.2/gcc/genemit.c gcc-4.1.2-owrt/gcc/genemit.c
19386 --- gcc-4.1.2/gcc/genemit.c 2005-06-25 04:02:01.000000000 +0200
19387 +++ gcc-4.1.2-owrt/gcc/genemit.c 2007-05-24 12:03:28.000000000 +0200
19388 @@ -122,6 +122,24 @@
19389 }
19390 \f
19391 static void
19392 +gen_vararg_prologue(int operands)
19393 +{
19394 + int i;
19395 +
19396 + if (operands > 1)
19397 + {
19398 + for (i = 1; i < operands; i++)
19399 + printf(" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
19400 +
19401 + printf(" va_list args;\n\n");
19402 + printf(" va_start(args, operand0);\n");
19403 + for (i = 1; i < operands; i++)
19404 + printf(" operand%d = va_arg(args, rtx);\n", i);
19405 + printf(" va_end(args);\n\n");
19406 + }
19407 +}
19408 +
19409 +static void
19410 print_code (RTX_CODE code)
19411 {
19412 const char *p1;
19413 @@ -406,18 +424,16 @@
19414 fatal ("match_dup operand number has no match_operand");
19415
19416 /* Output the function name and argument declarations. */
19417 - printf ("rtx\ngen_%s (", XSTR (insn, 0));
19418 + printf ("rtx\ngen_%s ", XSTR (insn, 0));
19419 +
19420 if (operands)
19421 - for (i = 0; i < operands; i++)
19422 - if (i)
19423 - printf (",\n\trtx operand%d ATTRIBUTE_UNUSED", i);
19424 - else
19425 - printf ("rtx operand%d ATTRIBUTE_UNUSED", i);
19426 + printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
19427 else
19428 - printf ("void");
19429 - printf (")\n");
19430 + printf("(void)\n");
19431 printf ("{\n");
19432
19433 + gen_vararg_prologue(operands);
19434 +
19435 /* Output code to construct and return the rtl for the instruction body. */
19436
19437 if (XVECLEN (insn, 1) == 1)
19438 @@ -457,16 +473,12 @@
19439 operands = max_operand_vec (expand, 1);
19440
19441 /* Output the function name and argument declarations. */
19442 - printf ("rtx\ngen_%s (", XSTR (expand, 0));
19443 + printf ("rtx\ngen_%s ", XSTR (expand, 0));
19444 if (operands)
19445 - for (i = 0; i < operands; i++)
19446 - if (i)
19447 - printf (",\n\trtx operand%d", i);
19448 - else
19449 - printf ("rtx operand%d", i);
19450 + printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
19451 else
19452 - printf ("void");
19453 - printf (")\n");
19454 + printf("(void)\n");
19455 +
19456 printf ("{\n");
19457
19458 /* If we don't have any C code to write, only one insn is being written,
19459 @@ -476,6 +488,8 @@
19460 && operands > max_dup_opno
19461 && XVECLEN (expand, 1) == 1)
19462 {
19463 + gen_vararg_prologue(operands);
19464 +
19465 printf (" return ");
19466 gen_exp (XVECEXP (expand, 1, 0), DEFINE_EXPAND, NULL);
19467 printf (";\n}\n\n");
19468 @@ -489,6 +503,7 @@
19469 for (; i <= max_scratch_opno; i++)
19470 printf (" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
19471 printf (" rtx _val = 0;\n");
19472 + gen_vararg_prologue(operands);
19473 printf (" start_sequence ();\n");
19474
19475 /* The fourth operand of DEFINE_EXPAND is some code to be executed
19476 diff -Nur gcc-4.1.2/gcc/genflags.c gcc-4.1.2-owrt/gcc/genflags.c
19477 --- gcc-4.1.2/gcc/genflags.c 2005-06-25 04:02:01.000000000 +0200
19478 +++ gcc-4.1.2-owrt/gcc/genflags.c 2007-05-24 12:03:28.000000000 +0200
19479 @@ -128,7 +128,6 @@
19480 gen_proto (rtx insn)
19481 {
19482 int num = num_operands (insn);
19483 - int i;
19484 const char *name = XSTR (insn, 0);
19485 int truth = maybe_eval_c_test (XSTR (insn, 2));
19486
19487 @@ -159,12 +158,7 @@
19488 if (num == 0)
19489 fputs ("void", stdout);
19490 else
19491 - {
19492 - for (i = 1; i < num; i++)
19493 - fputs ("rtx, ", stdout);
19494 -
19495 - fputs ("rtx", stdout);
19496 - }
19497 + fputs("rtx, ...", stdout);
19498
19499 puts (");");
19500
19501 @@ -174,12 +168,7 @@
19502 {
19503 printf ("static inline rtx\ngen_%s", name);
19504 if (num > 0)
19505 - {
19506 - putchar ('(');
19507 - for (i = 0; i < num-1; i++)
19508 - printf ("rtx ARG_UNUSED (%c), ", 'a' + i);
19509 - printf ("rtx ARG_UNUSED (%c))\n", 'a' + i);
19510 - }
19511 + puts("(rtx ARG_UNUSED(a), ...)");
19512 else
19513 puts ("(void)");
19514 puts ("{\n return 0;\n}");
19515 diff -Nur gcc-4.1.2/gcc/genoutput.c gcc-4.1.2-owrt/gcc/genoutput.c
19516 --- gcc-4.1.2/gcc/genoutput.c 2005-06-25 04:02:01.000000000 +0200
19517 +++ gcc-4.1.2-owrt/gcc/genoutput.c 2007-05-24 12:03:28.000000000 +0200
19518 @@ -383,7 +383,7 @@
19519 }
19520
19521 if (d->name && d->name[0] != '*')
19522 - printf (" (insn_gen_fn) gen_%s,\n", d->name);
19523 + printf (" gen_%s,\n", d->name);
19524 else
19525 printf (" 0,\n");
19526
19527 diff -Nur gcc-4.1.2/gcc/longlong.h gcc-4.1.2-owrt/gcc/longlong.h
19528 --- gcc-4.1.2/gcc/longlong.h 2005-12-06 11:02:57.000000000 +0100
19529 +++ gcc-4.1.2-owrt/gcc/longlong.h 2007-05-24 12:03:28.000000000 +0200
19530 @@ -227,6 +227,39 @@
19531 #define UDIV_TIME 100
19532 #endif /* __arm__ */
19533
19534 +#if defined (__avr32__) && W_TYPE_SIZE == 32
19535 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
19536 + __asm__ ("add\t%1, %4, %5\n\tadc\t%0, %2, %3" \
19537 + : "=r" ((USItype) (sh)), \
19538 + "=&r" ((USItype) (sl)) \
19539 + : "r" ((USItype) (ah)), \
19540 + "r" ((USItype) (bh)), \
19541 + "r" ((USItype) (al)), \
19542 + "r" ((USItype) (bl)) __CLOBBER_CC)
19543 +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
19544 + __asm__ ("sub\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
19545 + : "=r" ((USItype) (sh)), \
19546 + "=&r" ((USItype) (sl)) \
19547 + : "r" ((USItype) (ah)), \
19548 + "r" ((USItype) (bh)), \
19549 + "r" ((USItype) (al)), \
19550 + "r" ((USItype) (bl)) __CLOBBER_CC)
19551 +
19552 +#define __umulsidi3(a,b) ((UDItype)(a) * (UDItype)(b))
19553 +
19554 +#define umul_ppmm(w1, w0, u, v) \
19555 +{ \
19556 + DWunion __w; \
19557 + __w.ll = __umulsidi3 (u, v); \
19558 + w1 = __w.s.high; \
19559 + w0 = __w.s.low; \
19560 +}
19561 +
19562 +#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X))
19563 +#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctz (X))
19564 +#define COUNT_LEADING_ZEROS_0 32
19565 +#endif
19566 +
19567 #if defined (__hppa) && W_TYPE_SIZE == 32
19568 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
19569 __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0" \
19570 diff -Nur gcc-4.1.2/gcc/optabs.h gcc-4.1.2-owrt/gcc/optabs.h
19571 --- gcc-4.1.2/gcc/optabs.h 2005-08-19 23:20:02.000000000 +0200
19572 +++ gcc-4.1.2-owrt/gcc/optabs.h 2007-05-24 12:03:28.000000000 +0200
19573 @@ -415,7 +415,7 @@
19574 extern GTY(()) optab code_to_optab[NUM_RTX_CODE + 1];
19575
19576 \f
19577 -typedef rtx (*rtxfun) (rtx);
19578 +typedef rtx (*rtxfun) (rtx, ...);
19579
19580 /* Indexed by the rtx-code for a conditional (e.g. EQ, LT,...)
19581 gives the gen_function to make a branch to test that condition. */
19582 diff -Nur gcc-4.1.2/libstdc++-v3/acinclude.m4 gcc-4.1.2-owrt/libstdc++-v3/acinclude.m4
19583 --- gcc-4.1.2/libstdc++-v3/acinclude.m4 2007-01-29 11:51:01.000000000 +0100
19584 +++ gcc-4.1.2-owrt/libstdc++-v3/acinclude.m4 2007-05-24 12:03:28.000000000 +0200
19585 @@ -125,6 +125,15 @@
19586 ## other macros from doing the same. This should be automated.) -pme
19587 need_libmath=no
19588
19589 + # Check for uClibc since Linux platforms use different configuration
19590 + # directories depending on the C library in use.
19591 + AC_EGREP_CPP([_using_uclibc], [
19592 + #include <stdio.h>
19593 + #if __UCLIBC__
19594 + _using_uclibc
19595 + #endif
19596 + ], uclibc=yes, uclibc=no)
19597 +
19598 # Find platform-specific directories containing configuration info.
19599 # Also possibly modify flags used elsewhere, as needed by the platform.
19600 GLIBCXX_CHECK_HOST
19601 @@ -1040,8 +1049,8 @@
19602 #endif
19603 int main()
19604 {
19605 - const char __one[] = "Äuglein Augmen";
19606 - const char __two[] = "Äuglein";
19607 + const char __one[] = "�uglein Augmen";
19608 + const char __two[] = "�uglein";
19609 int i;
19610 int j;
19611 __locale_t loc;
19612 @@ -1916,6 +1925,14 @@
19613 ])
19614 ])
19615
19616 +# Macros that should have automatically be included, but...
19617 +m4_include([../config/enable.m4])
19618 +m4_include([../config/lead-dot.m4])
19619 +m4_include([../config/no-executables.m4])
19620 +m4_include([../libtool.m4])
19621 +m4_include([crossconfig.m4])
19622 +m4_include([linkage.m4])
19623 +
19624 # Macros from the top-level gcc directory.
19625 m4_include([../config/tls.m4])
19626
19627 diff -Nur gcc-4.1.2/libstdc++-v3/config/os/gnu-linux/ctype_base.h gcc-4.1.2-owrt/libstdc++-v3/config/os/gnu-linux/ctype_base.h
19628 --- gcc-4.1.2/libstdc++-v3/config/os/gnu-linux/ctype_base.h 2005-08-17 04:28:44.000000000 +0200
19629 +++ gcc-4.1.2-owrt/libstdc++-v3/config/os/gnu-linux/ctype_base.h 2007-05-24 12:03:28.000000000 +0200
19630 @@ -43,8 +43,8 @@
19631 struct ctype_base
19632 {
19633 // Non-standard typedefs.
19634 - typedef const int* __to_type;
19635 -
19636 + typedef const int* __to_type;
19637 +
19638 // NB: Offsets into ctype<char>::_M_table force a particular size
19639 // on the mask type. Because of this, we don't use an enum.
19640 typedef unsigned short mask;
19641 diff -Nur gcc-4.1.2/libstdc++-v3/configure.host gcc-4.1.2-owrt/libstdc++-v3/configure.host
19642 --- gcc-4.1.2/libstdc++-v3/configure.host 2007-01-28 21:12:40.000000000 +0100
19643 +++ gcc-4.1.2-owrt/libstdc++-v3/configure.host 2007-05-24 12:03:28.000000000 +0200
19644 @@ -214,8 +214,15 @@
19645 freebsd*)
19646 os_include_dir="os/bsd/freebsd"
19647 ;;
19648 + linux-uclibc*)
19649 + os_include_dir="os/uclibc-linux"
19650 + ;;
19651 gnu* | linux* | kfreebsd*-gnu | knetbsd*-gnu)
19652 - os_include_dir="os/gnu-linux"
19653 + if [ "$uclibc" = "yes" ]; then
19654 + os_include_dir="os/uclibc"
19655 + else
19656 + os_include_dir="os/gnu-linux"
19657 + fi
19658 ;;
19659 hpux*)
19660 os_include_dir="os/hpux"
19661 diff -Nur gcc-4.1.2/libstdc++-v3/include/Makefile.in gcc-4.1.2-owrt/libstdc++-v3/include/Makefile.in
19662 --- gcc-4.1.2/libstdc++-v3/include/Makefile.in 2006-01-10 18:14:00.000000000 +0100
19663 +++ gcc-4.1.2-owrt/libstdc++-v3/include/Makefile.in 2007-05-24 12:03:28.000000000 +0200
19664 @@ -36,6 +36,7 @@
19665 build_triplet = @build@
19666 host_triplet = @host@
19667 target_triplet = @target@
19668 +LIBOBJDIR =
19669 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
19670 $(top_srcdir)/fragment.am
19671 subdir = include
19672 diff -Nur gcc-4.1.2/libstdc++-v3/libmath/Makefile.in gcc-4.1.2-owrt/libstdc++-v3/libmath/Makefile.in
19673 --- gcc-4.1.2/libstdc++-v3/libmath/Makefile.in 2006-01-10 18:14:00.000000000 +0100
19674 +++ gcc-4.1.2-owrt/libstdc++-v3/libmath/Makefile.in 2007-05-24 12:03:28.000000000 +0200
19675 @@ -37,6 +37,7 @@
19676 build_triplet = @build@
19677 host_triplet = @host@
19678 target_triplet = @target@
19679 +LIBOBJDIR =
19680 subdir = libmath
19681 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
19682 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
19683 diff -Nur gcc-4.1.2/libstdc++-v3/libsupc++/Makefile.in gcc-4.1.2-owrt/libstdc++-v3/libsupc++/Makefile.in
19684 --- gcc-4.1.2/libstdc++-v3/libsupc++/Makefile.in 2006-01-10 18:14:00.000000000 +0100
19685 +++ gcc-4.1.2-owrt/libstdc++-v3/libsupc++/Makefile.in 2007-05-24 12:03:28.000000000 +0200
19686 @@ -38,6 +38,7 @@
19687 build_triplet = @build@
19688 host_triplet = @host@
19689 target_triplet = @target@
19690 +LIBOBJDIR =
19691 DIST_COMMON = $(glibcxxinstall_HEADERS) $(srcdir)/Makefile.am \
19692 $(srcdir)/Makefile.in $(top_srcdir)/fragment.am
19693 subdir = libsupc++
19694 diff -Nur gcc-4.1.2/libstdc++-v3/Makefile.in gcc-4.1.2-owrt/libstdc++-v3/Makefile.in
19695 --- gcc-4.1.2/libstdc++-v3/Makefile.in 2006-01-10 18:14:00.000000000 +0100
19696 +++ gcc-4.1.2-owrt/libstdc++-v3/Makefile.in 2007-05-24 12:03:28.000000000 +0200
19697 @@ -36,6 +36,7 @@
19698 build_triplet = @build@
19699 host_triplet = @host@
19700 target_triplet = @target@
19701 +LIBOBJDIR =
19702 DIST_COMMON = README $(am__configure_deps) $(srcdir)/../config.guess \
19703 $(srcdir)/../config.sub $(srcdir)/../install-sh \
19704 $(srcdir)/../ltmain.sh $(srcdir)/../missing \
19705 diff -Nur gcc-4.1.2/libstdc++-v3/po/Makefile.in gcc-4.1.2-owrt/libstdc++-v3/po/Makefile.in
19706 --- gcc-4.1.2/libstdc++-v3/po/Makefile.in 2006-01-10 18:14:00.000000000 +0100
19707 +++ gcc-4.1.2-owrt/libstdc++-v3/po/Makefile.in 2007-05-24 12:03:28.000000000 +0200
19708 @@ -36,6 +36,7 @@
19709 build_triplet = @build@
19710 host_triplet = @host@
19711 target_triplet = @target@
19712 +LIBOBJDIR =
19713 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
19714 $(top_srcdir)/fragment.am
19715 subdir = po
19716 diff -Nur gcc-4.1.2/libstdc++-v3/src/Makefile.in gcc-4.1.2-owrt/libstdc++-v3/src/Makefile.in
19717 --- gcc-4.1.2/libstdc++-v3/src/Makefile.in 2006-01-10 18:14:00.000000000 +0100
19718 +++ gcc-4.1.2-owrt/libstdc++-v3/src/Makefile.in 2007-05-24 12:03:28.000000000 +0200
19719 @@ -36,6 +36,7 @@
19720 build_triplet = @build@
19721 host_triplet = @host@
19722 target_triplet = @target@
19723 +LIBOBJDIR =
19724 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
19725 $(top_srcdir)/fragment.am
19726 subdir = src
This page took 0.998427 seconds and 5 git commands to generate.