-Index: gcc-4.2.3/configure.in
-===================================================================
---- gcc-4.2.3.orig/configure.in 2007-09-15 02:42:24.000000000 +0200
-+++ gcc-4.2.3/configure.in 2008-05-21 13:45:54.101287819 +0200
-@@ -503,6 +503,9 @@
+--- a/configure.in
++++ b/configure.in
+@@ -503,6 +503,9 @@ case "${target}" in
arm-*-riscix*)
noconfigdirs="$noconfigdirs ld target-libgloss ${libgcj}"
;;
avr-*-*)
noconfigdirs="$noconfigdirs target-libiberty target-libstdc++-v3 ${libgcj}"
;;
-Index: gcc-4.2.3/gcc/builtins.c
-===================================================================
---- gcc-4.2.3.orig/gcc/builtins.c 2008-01-23 11:38:21.000000000 +0100
-+++ gcc-4.2.3/gcc/builtins.c 2008-05-21 13:45:54.109288559 +0200
-@@ -9223,7 +9223,7 @@
+--- a/gcc/builtins.c
++++ b/gcc/builtins.c
+@@ -9223,7 +9223,7 @@ validate_arglist (tree arglist, ...)
do
{
switch (code)
{
case 0:
-Index: gcc-4.2.3/gcc/calls.c
-===================================================================
---- gcc-4.2.3.orig/gcc/calls.c 2007-09-01 17:28:30.000000000 +0200
-+++ gcc-4.2.3/gcc/calls.c 2008-05-21 13:45:54.117288181 +0200
-@@ -3447,7 +3447,7 @@
+--- a/gcc/calls.c
++++ b/gcc/calls.c
+@@ -3447,7 +3447,7 @@ emit_library_call_value_1 (int retval, r
for (; count < nargs; count++)
{
rtx val = va_arg (p, rtx);
/* We cannot convert the arg value to the mode the library wants here;
must do it earlier where we know the signedness of the arg. */
-Index: gcc-4.2.3/gcc/config/avr32/avr32.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/avr32.c 2008-05-21 13:45:54.145288116 +0200
-@@ -0,0 +1,7060 @@
+--- a/gcc/c-incpath.c
++++ b/gcc/c-incpath.c
+@@ -347,6 +347,18 @@ add_path (char *path, int chain, int cxx
+ char* c;
+ for (c = path; *c; c++)
+ if (*c == '\\') *c = '/';
++ /* Remove unnecessary trailing slashes. On some versions of MS
++ Windows, trailing _forward_ slashes cause no problems for stat().
++ On newer versions, stat() does not recognise a directory that ends
++ in a '\\' or '/', unless it is a drive root dir, such as "c:/",
++ where it is obligatory. */
++ int pathlen = strlen (path);
++ char* end = path + pathlen - 1;
++ /* Preserve the lead '/' or lead "c:/". */
++ char* start = path + (pathlen > 2 && path[1] == ':' ? 3 : 1);
++
++ for (; end > start && IS_DIR_SEPARATOR (*end); end--)
++ *end = 0;
+ #endif
+
+ p = XNEW (cpp_dir);
+--- /dev/null
++++ b/gcc/config/avr32/avr32.c
+@@ -0,0 +1,7915 @@
+/*
+ Target hooks and helper functions for AVR32.
+ Copyright 2003-2006 Atmel Corporation.
+static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *);
+static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *);
+static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args,
-+ int flags, bool * no_add_attrs);
++ int flags, bool * no_add_attrs);
+static void avr32_reorg (void);
+bool avr32_return_in_msb (tree type);
+bool avr32_vector_mode_supported (enum machine_mode mode);
+
+static void
+avr32_add_gc_roots (void)
-+ {
-+ gcc_obstack_init (&minipool_obstack);
-+ minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
-+ }
++{
++ gcc_obstack_init (&minipool_obstack);
++ minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
++}
+
+
+/* List of all known AVR32 parts */
+static const struct part_type_s avr32_part_types[] = {
-+ /* name, part_type, architecture type, macro */
-+ {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
-+ {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
-+ {"ap7010", PART_TYPE_AVR32_AP7010, ARCH_TYPE_AVR32_AP, "__AVR32_AP7010__"},
-+ {"ap7020", PART_TYPE_AVR32_AP7020, ARCH_TYPE_AVR32_AP, "__AVR32_AP7020__"},
-+ {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3A0256__"},
-+ {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3A0512__"},
-+ {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3A1128__"},
-+ {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3A1256__"},
-+ {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3A1512__"},
-+ {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3B064__"},
-+ {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3B0128__"},
-+ {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3B0256__"},
-+ {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3B164__"},
-+ {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3B1128__"},
-+ {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3B1256__"},
-+ {NULL, 0, 0, NULL}
++ /* name, part_type, architecture type, macro */
++ {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
++ {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
++ {"ap7001", PART_TYPE_AVR32_AP7001, ARCH_TYPE_AVR32_AP, "__AVR32_AP7001__"},
++ {"ap7002", PART_TYPE_AVR32_AP7002, ARCH_TYPE_AVR32_AP, "__AVR32_AP7002__"},
++ {"ap7200", PART_TYPE_AVR32_AP7200, ARCH_TYPE_AVR32_AP, "__AVR32_AP7200__"},
++ {"uc3a0128", PART_TYPE_AVR32_UC3A0128, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A0128__"},
++ {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A0256__"},
++ {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A0512__"},
++ {"uc3a0512es", PART_TYPE_AVR32_UC3A0512ES, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3A0512ES__"},
++ {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A1128__"},
++ {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A1256__"},
++ {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A1512__"},
++ {"uc3a1512es", PART_TYPE_AVR32_UC3A1512ES, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3A1512ES__"},
++ {"uc3a3revd", PART_TYPE_AVR32_UC3A3REVD, ARCH_TYPE_AVR32_UCR2NOMUL,
++ "__AVR32_UC3A3256S__"},
++ {"uc3a364", PART_TYPE_AVR32_UC3A364, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A364__"},
++ {"uc3a364s", PART_TYPE_AVR32_UC3A364S, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A364S__"},
++ {"uc3a3128", PART_TYPE_AVR32_UC3A3128, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A3128__"},
++ {"uc3a3128s", PART_TYPE_AVR32_UC3A3128S, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A3128S__"},
++ {"uc3a3256", PART_TYPE_AVR32_UC3A3256, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A3256__"},
++ {"uc3a3256s", PART_TYPE_AVR32_UC3A3256S, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A3256S__"},
++ {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B064__"},
++ {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B0128__"},
++ {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B0256__"},
++ {"uc3b0256es", PART_TYPE_AVR32_UC3B0256ES, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B0256ES__"},
++ {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B164__"},
++ {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B1128__"},
++ {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B1256__"},
++ {"uc3b1256es", PART_TYPE_AVR32_UC3B1256ES, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B1256ES__"},
++ {NULL, 0, 0, NULL}
+};
+
+/* List of all known AVR32 architectures */
+static const struct arch_type_s avr32_arch_types[] = {
-+ /* name, architecture type, microarchitecture type, feature flags, macro */
-+ {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B,
-+ (FLAG_AVR32_HAS_DSP
-+ | FLAG_AVR32_HAS_SIMD
-+ | FLAG_AVR32_HAS_UNALIGNED_WORD
-+ | FLAG_AVR32_HAS_CACHES
-+ | FLAG_AVR32_HAS_BRANCH_PRED
-+ | FLAG_AVR32_HAS_RETURN_STACK),
-+ "__AVR32_AP__"},
-+ {"uc", ARCH_TYPE_AVR32_UC, UARCH_TYPE_AVR32A,
-+ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW),
-+ "__AVR32_UC__"},
-+ {NULL, 0, 0, 0, NULL}
++ /* name, architecture type, microarchitecture type, feature flags, macro */
++ {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B,
++ (FLAG_AVR32_HAS_DSP
++ | FLAG_AVR32_HAS_SIMD
++ | FLAG_AVR32_HAS_UNALIGNED_WORD
++ | FLAG_AVR32_HAS_BRANCH_PRED | FLAG_AVR32_HAS_RETURN_STACK
++ | FLAG_AVR32_HAS_CACHES),
++ "__AVR32_AP__"},
++ {"ucr1", ARCH_TYPE_AVR32_UCR1, UARCH_TYPE_AVR32A,
++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW),
++ "__AVR32_UC__=1"},
++ {"ucr2", ARCH_TYPE_AVR32_UCR2, UARCH_TYPE_AVR32A,
++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
++ | FLAG_AVR32_HAS_V2_INSNS),
++ "__AVR32_UC__=2"},
++ {"ucr2nomul", ARCH_TYPE_AVR32_UCR2NOMUL, UARCH_TYPE_AVR32A,
++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
++ | FLAG_AVR32_HAS_V2_INSNS | FLAG_AVR32_HAS_NO_MUL_INSNS),
++ "__AVR32_UC__=3"},
++ {NULL, 0, 0, 0, NULL}
+};
+
+/* Default arch name */
-+const char *avr32_arch_name = "ap";
++const char *avr32_arch_name = "none";
+const char *avr32_part_name = "none";
+
+const struct part_type_s *avr32_part;
+const struct arch_type_s *avr32_arch;
+
++
+/* Set default target_flags. */
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS \
-+ (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION)
++ (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION | MASK_COND_EXEC_BEFORE_RELOAD)
+
-+void
++void
+avr32_optimization_options (int level,
+ int size){
+ if (AVR32_ALWAYS_PIC)
+/* Override command line options */
+void
+avr32_override_options (void)
-+ {
-+ const struct part_type_s *part;
-+ const struct arch_type_s *arch;
-+
-+ /* Check if part type is set. */
-+ for (part = avr32_part_types; part->name; part++)
-+ if (strcmp (part->name, avr32_part_name) == 0)
-+ break;
++{
++ const struct part_type_s *part;
++ const struct arch_type_s *arch;
+
-+ avr32_part = part;
++ /*Add backward compability*/
++ if (strcmp ("uc", avr32_arch_name)== 0)
++ {
++ fprintf (stderr, "Warning: Deprecated arch `%s' specified. "
++ "Please use '-march=ucr1' instead. "
++ "Converting to arch 'ucr1'\n",
++ avr32_arch_name);
++ avr32_arch_name="ucr1";
++ }
+
-+ if (!part->name)
-+ {
-+ fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
-+ avr32_part_name);
-+ for (part = avr32_part_types; part->name; part++)
-+ fprintf (stderr, "\t%s\n", part->name);
-+ avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
-+ }
++ /* Check if arch type is set. */
++ for (arch = avr32_arch_types; arch->name; arch++)
++ {
++ if (strcmp (arch->name, avr32_arch_name) == 0)
++ break;
++ }
++ avr32_arch = arch;
+
-+ avr32_arch = &avr32_arch_types[avr32_part->arch_type];
++ if (!arch->name && strcmp("none", avr32_arch_name) != 0)
++ {
++ fprintf (stderr, "Unknown arch `%s' specified\n"
++ "Known arch names:\n"
++ "\tuc (deprecated)\n",
++ avr32_arch_name);
++ for (arch = avr32_arch_types; arch->name; arch++)
++ fprintf (stderr, "\t%s\n", arch->name);
++ avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
++ }
+
-+ /* If part was set to "none" then check if arch was set. */
-+ if (strcmp (avr32_part->name, "none") == 0)
-+ {
-+ /* Check if arch type is set. */
-+ for (arch = avr32_arch_types; arch->name; arch++)
-+ if (strcmp (arch->name, avr32_arch_name) == 0)
-+ break;
++ /* Check if part type is set. */
++ for (part = avr32_part_types; part->name; part++)
++ if (strcmp (part->name, avr32_part_name) == 0)
++ break;
+
-+ avr32_arch = arch;
++ avr32_part = part;
++ if (!part->name)
++ {
++ fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
++ avr32_part_name);
++ for (part = avr32_part_types; part->name; part++)
++ {
++ if (strcmp("none", part->name) != 0)
++ fprintf (stderr, "\t%s\n", part->name);
++ }
++ /* Set default to NONE*/
++ avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
++ }
+
-+ if (!arch->name)
-+ {
-+ fprintf (stderr, "Unknown arch `%s' specified\nKnown arch names:\n",
-+ avr32_arch_name);
-+ for (arch = avr32_arch_types; arch->name; arch++)
-+ fprintf (stderr, "\t%s\n", arch->name);
-+ avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
-+ }
-+ }
++ /* NB! option -march= overrides option -mpart
++ * if both are used at the same time */
++ if (!arch->name)
++ avr32_arch = &avr32_arch_types[avr32_part->arch_type];
+
-+ /* If optimization level is two or greater, then align start of loops to a
++ /* If optimization level is two or greater, then align start of loops to a
+ word boundary since this will allow folding the first insn of the loop.
+ Do this only for targets supporting branch prediction. */
-+ if (optimize >= 2 && TARGET_BRANCH_PRED)
-+ align_loops = 2;
-+
++ if (optimize >= 2 && TARGET_BRANCH_PRED)
++ align_loops = 2;
+
-+ /* Enable section anchors if optimization is enabled. */
-+ if (optimize > 0 || optimize_size)
-+ flag_section_anchors = 1;
+
-+ /* Enable fast-float library if unsafe math optimizations
++ /* Enable fast-float library if unsafe math optimizations
+ are used. */
-+ if (flag_unsafe_math_optimizations)
-+ target_flags |= MASK_FAST_FLOAT;
++ if (flag_unsafe_math_optimizations)
++ target_flags |= MASK_FAST_FLOAT;
+
-+ /* Check if we should set avr32_imm_in_const_pool
-+ based on if caches are present or not. */
-+ if ( avr32_imm_in_const_pool == -1 )
-+ {
-+ if ( TARGET_CACHES )
-+ avr32_imm_in_const_pool = 1;
-+ else
-+ avr32_imm_in_const_pool = 0;
-+ }
-+
-+ avr32_add_gc_roots ();
-+ }
++ /* Check if we should set avr32_imm_in_const_pool
++ based on if caches are present or not. */
++ if ( avr32_imm_in_const_pool == -1 )
++ {
++ if ( TARGET_CACHES )
++ avr32_imm_in_const_pool = 1;
++ else
++ avr32_imm_in_const_pool = 0;
++ }
++
++ if (TARGET_NO_PIC)
++ flag_pic = 0;
++
++ avr32_add_gc_roots ();
++}
+
+
+/*
+compatibility reasons. Except in cases where required by standard
+or by a debugger, there is no reason why the stack layout used by GCC
+need agree with that used by other compilers for a machine.
-+ */
++*/
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB avr32_return_in_msb
+
++#undef TARGET_ENCODE_SECTION_INFO
++#define TARGET_ENCODE_SECTION_INFO avr32_encode_section_info
++
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes
+
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET ((1 << 15) - 1)
+
++#undef TARGET_SECONDARY_RELOAD
++#define TARGET_SECONDARY_RELOAD avr32_secondary_reload
++
++enum reg_class
++avr32_secondary_reload (bool in_p, rtx x, enum reg_class class ATTRIBUTE_UNUSED,
++ enum machine_mode mode, secondary_reload_info *sri)
++{
++
++ if ( avr32_rmw_memory_operand (x, mode) )
++ {
++ if (!in_p)
++ sri->icode = CODE_FOR_reload_out_rmw_memory_operand;
++ else
++ sri->icode = CODE_FOR_reload_in_rmw_memory_operand;
++ }
++ return NO_REGS;
++
++}
+
+/*
+ * Switches to the appropriate section for output of constant pool
+ Need to handle integer vectors */
+static bool
+avr32_assemble_integer (rtx x, unsigned int size, int aligned_p)
-+ {
-+ if (avr32_vector_mode_supported (GET_MODE (x)))
-+ {
-+ int i, units;
-+
-+ if (GET_CODE (x) != CONST_VECTOR)
-+ abort ();
-+
-+ units = CONST_VECTOR_NUNITS (x);
-+
-+ switch (GET_MODE (x))
-+ {
-+ case V2HImode:
-+ size = 2;
-+ break;
-+ case V4QImode:
-+ size = 1;
-+ break;
-+ default:
-+ abort ();
-+ }
++{
++ if (avr32_vector_mode_supported (GET_MODE (x)))
++ {
++ int i, units;
++
++ if (GET_CODE (x) != CONST_VECTOR)
++ abort ();
++
++ units = CONST_VECTOR_NUNITS (x);
++
++ switch (GET_MODE (x))
++ {
++ case V2HImode:
++ size = 2;
++ break;
++ case V4QImode:
++ size = 1;
++ break;
++ default:
++ abort ();
++ }
+
-+ for (i = 0; i < units; i++)
-+ {
-+ rtx elt;
++ for (i = 0; i < units; i++)
++ {
++ rtx elt;
+
-+ elt = CONST_VECTOR_ELT (x, i);
-+ assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
-+ }
++ elt = CONST_VECTOR_ELT (x, i);
++ assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
++ }
+
-+ return true;
-+ }
++ return true;
++ }
+
-+ return default_assemble_integer (x, size, aligned_p);
-+ }
++ return default_assemble_integer (x, size, aligned_p);
++}
+
+/*
+ * This target hook describes the relative costs of RTL expressions.
+/* Worker routine for avr32_rtx_costs. */
+static inline int
+avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED,
-+ enum rtx_code outer ATTRIBUTE_UNUSED)
-+ {
-+ enum machine_mode mode = GET_MODE (x);
++ enum rtx_code outer ATTRIBUTE_UNUSED)
++{
++ enum machine_mode mode = GET_MODE (x);
+
-+ switch (GET_CODE (x))
++ switch (GET_CODE (x))
+ {
+ case MEM:
+ /* Using pre decrement / post increment memory operations on the
+ avr32_uc architecture means that two writebacks must be performed
+ and hence two cycles are needed. */
+ if (!optimize_size
-+ && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
-+ && avr32_arch->arch_type == ARCH_TYPE_AVR32_UC
-+ && (GET_CODE (XEXP (x, 0)) == PRE_DEC
-+ || GET_CODE (XEXP (x, 0)) == POST_INC))
-+ return COSTS_N_INSNS (5);
++ && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
++ && TARGET_ARCH_UC
++ && (GET_CODE (XEXP (x, 0)) == PRE_DEC
++ || GET_CODE (XEXP (x, 0)) == POST_INC))
++ return COSTS_N_INSNS (5);
+
+ /* Memory costs quite a lot for the first word, but subsequent words
+ load at the equivalent of a single insn each. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
-+ return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
++ return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
+
+ return COSTS_N_INSNS (4);
+ case SYMBOL_REF:
+ case ROTATE:
+ case ROTATERT:
+ if (mode == TImode)
-+ return COSTS_N_INSNS (100);
++ return COSTS_N_INSNS (100);
+
+ if (mode == DImode)
-+ return COSTS_N_INSNS (10);
++ return COSTS_N_INSNS (10);
+ return COSTS_N_INSNS (4);
+ case ASHIFT:
+ case LSHIFTRT:
+ case ASHIFTRT:
+ case NOT:
+ if (mode == TImode)
-+ return COSTS_N_INSNS (10);
++ return COSTS_N_INSNS (10);
+
+ if (mode == DImode)
-+ return COSTS_N_INSNS (4);
++ return COSTS_N_INSNS (4);
+ return COSTS_N_INSNS (1);
+ case PLUS:
+ case MINUS:
+ case COMPARE:
+ case ABS:
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
-+ return COSTS_N_INSNS (100);
++ return COSTS_N_INSNS (100);
+
+ if (mode == TImode)
-+ return COSTS_N_INSNS (50);
++ return COSTS_N_INSNS (50);
+
+ if (mode == DImode)
-+ return COSTS_N_INSNS (2);
++ return COSTS_N_INSNS (2);
+ return COSTS_N_INSNS (1);
+
+ case MULT:
-+ {
-+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
-+ return COSTS_N_INSNS (300);
++ {
++ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
++ return COSTS_N_INSNS (300);
+
-+ if (mode == TImode)
-+ return COSTS_N_INSNS (16);
++ if (mode == TImode)
++ return COSTS_N_INSNS (16);
+
-+ if (mode == DImode)
-+ return COSTS_N_INSNS (4);
++ if (mode == DImode)
++ return COSTS_N_INSNS (4);
+
-+ if (mode == HImode)
-+ return COSTS_N_INSNS (2);
++ if (mode == HImode)
++ return COSTS_N_INSNS (2);
+
-+ return COSTS_N_INSNS (3);
-+ }
++ return COSTS_N_INSNS (3);
++ }
+ case IF_THEN_ELSE:
+ if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
-+ return COSTS_N_INSNS (4);
++ return COSTS_N_INSNS (4);
+ return COSTS_N_INSNS (1);
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ instrcutions only take one register operand which means that gcc
+ often must insert some move instrcutions */
+ if (mode == QImode || mode == HImode)
-+ return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
++ return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
+ return COSTS_N_INSNS (4);
+ case UNSPEC:
+ /* divmod operations */
+ if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL
-+ || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
-+ {
-+ return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
-+ }
++ || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
++ {
++ return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
++ }
+ /* Fallthrough */
+ default:
+ return COSTS_N_INSNS (1);
+ }
-+ }
++}
+
+static bool
+avr32_rtx_costs (rtx x, int code, int outer_code, int *total)
-+ {
-+ *total = avr32_rtx_costs_1 (x, code, outer_code);
-+ return true;
-+ }
++{
++ *total = avr32_rtx_costs_1 (x, code, outer_code);
++ return true;
++}
+
+
+bool
+avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
-+ {
-+ /* Do not want symbols in the constant pool when compiling pic or if using
++{
++ /* Do not want symbols in the constant pool when compiling pic or if using
+ address pseudo instructions. */
-+ return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
-+ && avr32_find_symbol (x) != NULL_RTX);
-+ }
++ return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
++ && avr32_find_symbol (x) != NULL_RTX);
++}
+
+
+/* Table of machine attributes. */
+const struct attribute_spec avr32_attribute_table[] = {
-+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
-+ /* Interrupt Service Routines have special prologue and epilogue
++ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
++ /* Interrupt Service Routines have special prologue and epilogue
+ requirements. */
-+ {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
-+ {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
-+ {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
-+ {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
-+ {NULL, 0, 0, false, false, false, NULL}
++ {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
++ {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
++ {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
++ {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
++ {"rmw_addressable", 0, 0, true, false, false, NULL},
++ {NULL, 0, 0, false, false, false, NULL}
+};
+
+
+isr_attribute_arg;
+
+static const isr_attribute_arg isr_attribute_args[] = {
-+ {"FULL", AVR32_FT_ISR_FULL},
-+ {"full", AVR32_FT_ISR_FULL},
-+ {"HALF", AVR32_FT_ISR_HALF},
-+ {"half", AVR32_FT_ISR_HALF},
-+ {"NONE", AVR32_FT_ISR_NONE},
-+ {"none", AVR32_FT_ISR_NONE},
-+ {"UNDEF", AVR32_FT_ISR_NONE},
-+ {"undef", AVR32_FT_ISR_NONE},
-+ {"SWI", AVR32_FT_ISR_NONE},
-+ {"swi", AVR32_FT_ISR_NONE},
-+ {NULL, AVR32_FT_ISR_NONE}
++ {"FULL", AVR32_FT_ISR_FULL},
++ {"full", AVR32_FT_ISR_FULL},
++ {"HALF", AVR32_FT_ISR_HALF},
++ {"half", AVR32_FT_ISR_HALF},
++ {"NONE", AVR32_FT_ISR_NONE},
++ {"none", AVR32_FT_ISR_NONE},
++ {"UNDEF", AVR32_FT_ISR_NONE},
++ {"undef", AVR32_FT_ISR_NONE},
++ {"SWI", AVR32_FT_ISR_NONE},
++ {"swi", AVR32_FT_ISR_NONE},
++ {NULL, AVR32_FT_ISR_NONE}
+};
+
+/* Returns the (interrupt) function type of the current
+
+static unsigned long
+avr32_isr_value (tree argument)
-+ {
-+ const isr_attribute_arg *ptr;
-+ const char *arg;
++{
++ const isr_attribute_arg *ptr;
++ const char *arg;
+
-+ /* No argument - default to ISR_NONE. */
-+ if (argument == NULL_TREE)
-+ return AVR32_FT_ISR_NONE;
++ /* No argument - default to ISR_NONE. */
++ if (argument == NULL_TREE)
++ return AVR32_FT_ISR_NONE;
+
-+ /* Get the value of the argument. */
-+ if (TREE_VALUE (argument) == NULL_TREE
-+ || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
-+ return AVR32_FT_UNKNOWN;
++ /* Get the value of the argument. */
++ if (TREE_VALUE (argument) == NULL_TREE
++ || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
++ return AVR32_FT_UNKNOWN;
+
-+ arg = TREE_STRING_POINTER (TREE_VALUE (argument));
++ arg = TREE_STRING_POINTER (TREE_VALUE (argument));
+
-+ /* Check it against the list of known arguments. */
-+ for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
-+ if (streq (arg, ptr->arg))
-+ return ptr->return_value;
++ /* Check it against the list of known arguments. */
++ for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
++ if (streq (arg, ptr->arg))
++ return ptr->return_value;
+
-+ /* An unrecognized interrupt type. */
-+ return AVR32_FT_UNKNOWN;
-+ }
++ /* An unrecognized interrupt type. */
++ return AVR32_FT_UNKNOWN;
++}
+
+
+
+The compiler will print these strings at the start of a new line,
+followed immediately by the object's initial value. In most cases,
+the string should contain a tab, a pseudo-op, and then another tab.
-+ */
++*/
+#undef TARGET_ASM_BYTE_OP
+#define TARGET_ASM_BYTE_OP "\t.byte\t"
+#undef TARGET_ASM_ALIGNED_HI_OP
+
+
+ if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
-+ || vcall_offset
-+ || flag_pic)
++ || vcall_offset)
+ {
+ fputs ("\tpushm\tlr\n", file);
+ }
+ {
+ if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21"))
+ {
-+ fprintf (file, "\tsub\t%s, -0x%x\n", reg_names[this_regno],
-+ mi_delta);
++ fprintf (file, "\tsub\t%s, %d\n", reg_names[this_regno], -mi_delta);
+ }
+ else
+ {
+ /* Immediate is larger than k21 we must make us a temp register by
+ pushing a register to the stack. */
-+ fprintf (file, "\tmov\tlr, lo(%x)\n", mi_delta);
-+ fprintf (file, "\torh\tlr, hi(%x)\n", mi_delta);
++ fprintf (file, "\tmov\tlr, lo(%d)\n", mi_delta);
++ fprintf (file, "\torh\tlr, hi(%d)\n", mi_delta);
+ fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
+ }
+ }
+ }
+
+
-+ if ( (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
-+ || vcall_offset)
-+ && !flag_pic )
++ if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
++ || vcall_offset)
+ {
+ fputs ("\tpopm\tlr\n", file);
+ }
+
-+ if (flag_pic)
-+ {
-+ /* Load the got into lr and then load the pointer
-+ to the function from the got and put it on the stack.
-+ We can then call the function and restore lr by issuing
-+ a doubleword load from the stack. We do not use a popm/ldm
-+ since it will be treated as a return and might need a flushing
-+ of the return-stack if available. */
-+ rtx label = gen_label_rtx ();
-+ /* Load the got. */
-+ fputs ("\tlddpc\tlr, 0f\n", file);
-+ (*targetm.asm_out.internal_label) (file, "L",
-+ CODE_LABEL_NUMBER (label));
-+ fputs ("\trsub\tlr, pc\n", file);
-+ /* Load the function pointer. */
-+ fputs ("\tld.w\tlr, lr[", file);
-+ assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
-+ fputs ("@got]\n", file);
-+ /* Push the function pointer on the stack.*/
-+ fputs ("\tpushm\tlr\n", file);
-+ /* Restore the old lr value and load the function pointer into
-+ pc. */
-+ fputs ("\tld.d\tlr,sp++\n", file);
-+ fprintf (file, "\t.align 2\n");
-+ fprintf (file, "0:\t.long\t.L%d - _GLOBAL_OFFSET_TABLE_\n", CODE_LABEL_NUMBER (label));
-+ }
-+ else
-+ {
-+ fprintf (file, "\tlddpc\tpc, 0f\n");
-+ fprintf (file, "\t.align 2\n");
-+ fputs ("0:\t.long\t", file);
-+ assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
-+ fputc ('\n', file);
-+ }
++ /* Jump to the function. We assume that we can use an rjmp since the
++ function to jump to is local and probably not too far away from
++ the thunk. If this assumption proves to be wrong we could implement
++ this jump by calculating the offset between the jump source and destination
++ and put this in the constant pool and then perform an add to pc.
++ This would also be legitimate PIC code. But for now we hope that an rjmp
++ will be sufficient...
++ */
++ fputs ("\trjmp\t", file);
++ assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
++ fputc ('\n', file);
+ }
+
++
+/* Implements target hook vector_mode_supported. */
+bool
+avr32_vector_mode_supported (enum machine_mode mode)
-+ {
-+ if ((mode == V2HImode) || (mode == V4QImode))
-+ return true;
++{
++ if ((mode == V2HImode) || (mode == V4QImode))
++ return true;
+
-+ return false;
-+ }
++ return false;
++}
+
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_EXPAND_BUILTIN avr32_expand_builtin
+
+tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int,
-+void_ftype_ptr_int;
++ void_ftype_ptr_int;
+tree void_ftype_int, void_ftype_void, int_ftype_ptr_int;
+tree short_ftype_short, int_ftype_int_short, int_ftype_short_short,
-+short_ftype_short_short;
++ short_ftype_short_short;
+tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short;
+tree void_ftype_int_int_int_int_int, void_ftype_int_int_int;
+tree longlong_ftype_int_int, void_ftype_int_int_longlong;
+
+#define def_builtin(NAME, TYPE, CODE) \
+ lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
-+ BUILT_IN_MD, NULL, NULL_TREE)
++ BUILT_IN_MD, NULL, NULL_TREE)
+
+#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
+ do \
-+ { \
-+ if ((MASK)) \
-+ lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
-+ BUILT_IN_MD, NULL, NULL_TREE); \
-+ } \
++ { \
++ if ((MASK)) \
++ lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
++ BUILT_IN_MD, NULL, NULL_TREE); \
++ } \
+ while (0)
+
+struct builtin_description
+ { 1, CODE_FOR_##code, "__builtin_" #code , \
+ AVR32_BUILTIN_##builtin, 0, 0, ftype }
+
-+ DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
-+ DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
-+ DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
-+ DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
-+ DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
-+ DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
-+ DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
-+ DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
-+ DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
-+ DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
-+ DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
++ DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
++ DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
++ DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
++ DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
++ DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
++ DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
++ DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
++ DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
++ DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
++ DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
++ DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
+};
+
+
+void
+avr32_init_builtins (void)
-+ {
-+ unsigned int i;
-+ const struct builtin_description *d;
-+ tree endlink = void_list_node;
-+ tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
-+ tree longlong_endlink =
-+ tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
-+ tree short_endlink =
-+ tree_cons (NULL_TREE, short_integer_type_node, endlink);
-+ tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
-+
-+ /* int func (int) */
-+ int_ftype_int = build_function_type (integer_type_node, int_endlink);
-+
-+ /* short func (short) */
-+ short_ftype_short
++{
++ unsigned int i;
++ const struct builtin_description *d;
++ tree endlink = void_list_node;
++ tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
++ tree longlong_endlink =
++ tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
++ tree short_endlink =
++ tree_cons (NULL_TREE, short_integer_type_node, endlink);
++ tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
++
++ /* int func (int) */
++ int_ftype_int = build_function_type (integer_type_node, int_endlink);
++
++ /* short func (short) */
++ short_ftype_short
+ = build_function_type (short_integer_type_node, short_endlink);
+
-+ /* short func (short, short) */
-+ short_ftype_short_short
++ /* short func (short, short) */
++ short_ftype_short_short
+ = build_function_type (short_integer_type_node,
-+ tree_cons (NULL_TREE, short_integer_type_node,
-+ short_endlink));
++ tree_cons (NULL_TREE, short_integer_type_node,
++ short_endlink));
+
-+ /* long long func (long long, short, short) */
-+ longlong_ftype_longlong_short_short
++ /* long long func (long long, short, short) */
++ longlong_ftype_longlong_short_short
+ = build_function_type (long_long_integer_type_node,
-+ tree_cons (NULL_TREE, long_long_integer_type_node,
-+ tree_cons (NULL_TREE,
-+ short_integer_type_node,
-+ short_endlink)));
++ tree_cons (NULL_TREE, long_long_integer_type_node,
++ tree_cons (NULL_TREE,
++ short_integer_type_node,
++ short_endlink)));
+
-+ /* long long func (short, short) */
-+ longlong_ftype_short_short
++ /* long long func (short, short) */
++ longlong_ftype_short_short
+ = build_function_type (long_long_integer_type_node,
-+ tree_cons (NULL_TREE, short_integer_type_node,
-+ short_endlink));
++ tree_cons (NULL_TREE, short_integer_type_node,
++ short_endlink));
+
-+ /* int func (int, int) */
-+ int_ftype_int_int
++ /* int func (int, int) */
++ int_ftype_int_int
+ = build_function_type (integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ int_endlink));
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink));
+
-+ /* long long func (int, int) */
-+ longlong_ftype_int_int
++ /* long long func (int, int) */
++ longlong_ftype_int_int
+ = build_function_type (long_long_integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ int_endlink));
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink));
+
-+ /* long long int func (long long, int, short) */
-+ longlong_ftype_longlong_int_short
++ /* long long int func (long long, int, short) */
++ longlong_ftype_longlong_int_short
+ = build_function_type (long_long_integer_type_node,
-+ tree_cons (NULL_TREE, long_long_integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ short_endlink)));
++ tree_cons (NULL_TREE, long_long_integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ short_endlink)));
+
-+ /* long long int func (int, short) */
-+ longlong_ftype_int_short
++ /* long long int func (int, short) */
++ longlong_ftype_int_short
+ = build_function_type (long_long_integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ short_endlink));
++ tree_cons (NULL_TREE, integer_type_node,
++ short_endlink));
+
-+ /* int func (int, short, short) */
-+ int_ftype_int_short_short
++ /* int func (int, short, short) */
++ int_ftype_int_short_short
+ = build_function_type (integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ tree_cons (NULL_TREE,
-+ short_integer_type_node,
-+ short_endlink)));
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE,
++ short_integer_type_node,
++ short_endlink)));
+
-+ /* int func (short, short) */
-+ int_ftype_short_short
++ /* int func (short, short) */
++ int_ftype_short_short
+ = build_function_type (integer_type_node,
-+ tree_cons (NULL_TREE, short_integer_type_node,
-+ short_endlink));
++ tree_cons (NULL_TREE, short_integer_type_node,
++ short_endlink));
+
-+ /* int func (int, short) */
-+ int_ftype_int_short
++ /* int func (int, short) */
++ int_ftype_int_short
+ = build_function_type (integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ short_endlink));
++ tree_cons (NULL_TREE, integer_type_node,
++ short_endlink));
+
-+ /* void func (int, int) */
-+ void_ftype_int_int
++ /* void func (int, int) */
++ void_ftype_int_int
+ = build_function_type (void_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ int_endlink));
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink));
+
-+ /* void func (int, int, int) */
-+ void_ftype_int_int_int
++ /* void func (int, int, int) */
++ void_ftype_int_int_int
+ = build_function_type (void_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ int_endlink)));
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink)));
+
-+ /* void func (int, int, long long) */
-+ void_ftype_int_int_longlong
++ /* void func (int, int, long long) */
++ void_ftype_int_int_longlong
+ = build_function_type (void_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ longlong_endlink)));
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ longlong_endlink)));
+
-+ /* void func (int, int, int, int, int) */
-+ void_ftype_int_int_int_int_int
++ /* void func (int, int, int, int, int) */
++ void_ftype_int_int_int_int_int
+ = build_function_type (void_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ tree_cons (NULL_TREE,
-+ integer_type_node,
-+ tree_cons
-+ (NULL_TREE,
-+ integer_type_node,
-+ int_endlink)))));
-+
-+ /* void func (void *, int) */
-+ void_ftype_ptr_int
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE,
++ integer_type_node,
++ tree_cons
++ (NULL_TREE,
++ integer_type_node,
++ int_endlink)))));
++
++ /* void func (void *, int) */
++ void_ftype_ptr_int
+ = build_function_type (void_type_node,
-+ tree_cons (NULL_TREE, ptr_type_node, int_endlink));
++ tree_cons (NULL_TREE, ptr_type_node, int_endlink));
+
-+ /* void func (int) */
-+ void_ftype_int = build_function_type (void_type_node, int_endlink);
++ /* void func (int) */
++ void_ftype_int = build_function_type (void_type_node, int_endlink);
+
-+ /* void func (void) */
-+ void_ftype_void = build_function_type (void_type_node, void_endlink);
++ /* void func (void) */
++ void_ftype_void = build_function_type (void_type_node, void_endlink);
+
-+ /* int func (void) */
-+ int_ftype_void = build_function_type (integer_type_node, void_endlink);
++ /* int func (void) */
++ int_ftype_void = build_function_type (integer_type_node, void_endlink);
+
-+ /* int func (void *, int) */
-+ int_ftype_ptr_int
++ /* int func (void *, int) */
++ int_ftype_ptr_int
+ = build_function_type (integer_type_node,
-+ tree_cons (NULL_TREE, ptr_type_node, int_endlink));
++ tree_cons (NULL_TREE, ptr_type_node, int_endlink));
+
-+ /* int func (int, int, int) */
-+ int_ftype_int_int_int
++ /* int func (int, int, int) */
++ int_ftype_int_int_int
+ = build_function_type (integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ int_endlink)));
-+
-+ /* Initialize avr32 builtins. */
-+ def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
-+ def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
-+ def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
-+ def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
-+ def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
-+ def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
-+ def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
-+ def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
-+ def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
-+ def_builtin ("__builtin_breakpoint", void_ftype_void,
-+ AVR32_BUILTIN_BREAKPOINT);
-+ def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
-+ def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
-+ def_builtin ("__builtin_bswap_16", short_ftype_short,
-+ AVR32_BUILTIN_BSWAP16);
-+ def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
-+ def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
-+ AVR32_BUILTIN_COP);
-+ def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
-+ def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
-+ AVR32_BUILTIN_MVRC_W);
-+ def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
-+ AVR32_BUILTIN_MVCR_D);
-+ def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
-+ AVR32_BUILTIN_MVRC_D);
-+ def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
-+ def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
-+ def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
-+ AVR32_BUILTIN_SATRNDS);
-+ def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
-+ AVR32_BUILTIN_SATRNDU);
-+ def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
-+ def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
-+ def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
-+ AVR32_BUILTIN_MACSATHH_W);
-+ def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
-+ AVR32_BUILTIN_MACWH_D);
-+ def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
-+ AVR32_BUILTIN_MACHH_D);
-+
-+ /* Add all builtins that are more or less simple operations on two
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink)));
++
++ /* Initialize avr32 builtins. */
++ def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
++ def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
++ def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
++ def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
++ def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
++ def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
++ def_builtin ("__builtin_ssrf", void_ftype_int, AVR32_BUILTIN_SSRF);
++ def_builtin ("__builtin_csrf", void_ftype_int, AVR32_BUILTIN_CSRF);
++ def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
++ def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
++ def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
++ def_builtin ("__builtin_breakpoint", void_ftype_void,
++ AVR32_BUILTIN_BREAKPOINT);
++ def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
++ def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
++ def_builtin ("__builtin_bswap_16", short_ftype_short,
++ AVR32_BUILTIN_BSWAP16);
++ def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
++ def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
++ AVR32_BUILTIN_COP);
++ def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
++ def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
++ AVR32_BUILTIN_MVRC_W);
++ def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
++ AVR32_BUILTIN_MVCR_D);
++ def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
++ AVR32_BUILTIN_MVRC_D);
++ def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
++ def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
++ def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
++ AVR32_BUILTIN_SATRNDS);
++ def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
++ AVR32_BUILTIN_SATRNDU);
++ def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
++ def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
++ def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
++ AVR32_BUILTIN_MACSATHH_W);
++ def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
++ AVR32_BUILTIN_MACWH_D);
++ def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
++ AVR32_BUILTIN_MACHH_D);
++ def_builtin ("__builtin_mems", void_ftype_ptr_int, AVR32_BUILTIN_MEMS);
++ def_builtin ("__builtin_memt", void_ftype_ptr_int, AVR32_BUILTIN_MEMT);
++ def_builtin ("__builtin_memc", void_ftype_ptr_int, AVR32_BUILTIN_MEMC);
++
++ /* Add all builtins that are more or less simple operations on two
+ operands. */
-+ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
-+ {
-+ /* Use one of the operands; the target can have a different mode for
++ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
++ {
++ /* Use one of the operands; the target can have a different mode for
+ mask-generating compares. */
+
-+ if (d->name == 0)
-+ continue;
++ if (d->name == 0)
++ continue;
+
-+ def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
-+ }
-+ }
++ def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
++ }
++}
+
+
+/* Subroutine of avr32_expand_builtin to take care of binop insns. */
+
+static rtx
+avr32_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
-+ {
-+ rtx pat;
-+ tree arg0 = TREE_VALUE (arglist);
-+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-+ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
-+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
-+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
-+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
-+
-+ if (!target
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
-+
-+ /* In case the insn wants input operands in modes different from the
++{
++ rtx pat;
++ tree arg0 = TREE_VALUE (arglist);
++ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ enum machine_mode tmode = insn_data[icode].operand[0].mode;
++ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
++ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
++
++ if (!target
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++
++ /* In case the insn wants input operands in modes different from the
+ result, abort. */
-+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
-+ /* If op0 is already a reg we must cast it to the correct mode. */
-+ if (REG_P (op0))
-+ op0 = convert_to_mode (mode0, op0, 1);
-+ else
-+ op0 = copy_to_mode_reg (mode0, op0);
-+ }
-+ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
-+ {
-+ /* If op1 is already a reg we must cast it to the correct mode. */
-+ if (REG_P (op1))
-+ op1 = convert_to_mode (mode1, op1, 1);
-+ else
-+ op1 = copy_to_mode_reg (mode1, op1);
-+ }
-+ pat = GEN_FCN (icode) (target, op0, op1);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
-+ return target;
-+ }
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ /* If op0 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op0))
++ op0 = convert_to_mode (mode0, op0, 1);
++ else
++ op0 = copy_to_mode_reg (mode0, op0);
++ }
++ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
++ {
++ /* If op1 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op1))
++ op1 = convert_to_mode (mode1, op1, 1);
++ else
++ op1 = copy_to_mode_reg (mode1, op1);
++ }
++ pat = GEN_FCN (icode) (target, op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+
+rtx
+avr32_expand_builtin (tree exp,
-+ rtx target,
-+ rtx subtarget ATTRIBUTE_UNUSED,
-+ enum machine_mode mode ATTRIBUTE_UNUSED,
-+ int ignore ATTRIBUTE_UNUSED)
-+ {
-+ const struct builtin_description *d;
-+ unsigned int i;
-+ enum insn_code icode;
-+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
-+ tree arglist = TREE_OPERAND (exp, 1);
-+ tree arg0, arg1, arg2;
-+ rtx op0, op1, op2, pat;
-+ enum machine_mode tmode, mode0, mode1;
-+ enum machine_mode arg0_mode;
-+ int fcode = DECL_FUNCTION_CODE (fndecl);
-+
-+ switch (fcode)
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ const struct builtin_description *d;
++ unsigned int i;
++ enum insn_code icode = 0;
++ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
++ tree arglist = TREE_OPERAND (exp, 1);
++ tree arg0, arg1, arg2;
++ rtx op0, op1, op2, pat;
++ enum machine_mode tmode, mode0, mode1;
++ enum machine_mode arg0_mode;
++ int fcode = DECL_FUNCTION_CODE (fndecl);
++
++ switch (fcode)
+ {
+ default:
+ break;
+ case AVR32_BUILTIN_SATU:
+ case AVR32_BUILTIN_SATRNDS:
+ case AVR32_BUILTIN_SATRNDU:
-+ {
-+ const char *fname;
-+ switch (fcode)
-+ {
-+ default:
-+ case AVR32_BUILTIN_SATS:
-+ icode = CODE_FOR_sats;
-+ fname = "sats";
-+ break;
-+ case AVR32_BUILTIN_SATU:
-+ icode = CODE_FOR_satu;
-+ fname = "satu";
-+ break;
-+ case AVR32_BUILTIN_SATRNDS:
-+ icode = CODE_FOR_satrnds;
-+ fname = "satrnds";
-+ break;
-+ case AVR32_BUILTIN_SATRNDU:
-+ icode = CODE_FOR_satrndu;
-+ fname = "satrndu";
-+ break;
-+ }
++ {
++ const char *fname;
++ switch (fcode)
++ {
++ default:
++ case AVR32_BUILTIN_SATS:
++ icode = CODE_FOR_sats;
++ fname = "sats";
++ break;
++ case AVR32_BUILTIN_SATU:
++ icode = CODE_FOR_satu;
++ fname = "satu";
++ break;
++ case AVR32_BUILTIN_SATRNDS:
++ icode = CODE_FOR_satrnds;
++ fname = "satrnds";
++ break;
++ case AVR32_BUILTIN_SATRNDU:
++ icode = CODE_FOR_satrndu;
++ fname = "satrndu";
++ break;
++ }
+
-+ arg0 = TREE_VALUE (arglist);
-+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
-+ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
++ arg0 = TREE_VALUE (arglist);
++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+
-+ tmode = insn_data[icode].operand[0].mode;
++ tmode = insn_data[icode].operand[0].mode;
+
+
-+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+
+
-+ if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
-+ {
-+ op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
-+ }
++ if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
++ {
++ op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
++ }
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
-+ {
-+ error ("Parameter 2 to __builtin_%s should be a constant number.",
-+ fname);
-+ return NULL_RTX;
-+ }
++ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
++ {
++ error ("Parameter 2 to __builtin_%s should be a constant number.",
++ fname);
++ return NULL_RTX;
++ }
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
-+ {
-+ error ("Parameter 3 to __builtin_%s should be a constant number.",
-+ fname);
-+ return NULL_RTX;
-+ }
++ if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
++ {
++ error ("Parameter 3 to __builtin_%s should be a constant number.",
++ fname);
++ return NULL_RTX;
++ }
+
-+ emit_move_insn (target, op0);
-+ pat = GEN_FCN (icode) (target, op1, op2);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
++ emit_move_insn (target, op0);
++ pat = GEN_FCN (icode) (target, op1, op2);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
+
-+ return target;
-+ }
++ return target;
++ }
+ case AVR32_BUILTIN_MUSTR:
+ icode = CODE_FOR_mustr;
+ tmode = insn_data[icode].operand[0].mode;
+
+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return target;
+
+ mode0 = insn_data[icode].operand[1].mode;
+
+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
-+ error ("Parameter 1 to __builtin_mfsr must be a constant number");
-+ }
++ {
++ error ("Parameter 1 to __builtin_mfsr must be a constant number");
++ }
+
+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return target;
+ case AVR32_BUILTIN_MTSR:
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
-+ {
-+ error ("Parameter 1 to __builtin_mtsr must be a constant number");
-+ return gen_reg_rtx (mode0);
-+ }
++ {
++ error ("Parameter 1 to __builtin_mtsr must be a constant number");
++ return gen_reg_rtx (mode0);
++ }
+ if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
-+ op1 = copy_to_mode_reg (mode1, op1);
++ op1 = copy_to_mode_reg (mode1, op1);
+ pat = GEN_FCN (icode) (op0, op1);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_MFDR:
+ mode0 = insn_data[icode].operand[1].mode;
+
+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
-+ error ("Parameter 1 to __builtin_mfdr must be a constant number");
-+ }
++ {
++ error ("Parameter 1 to __builtin_mfdr must be a constant number");
++ }
+
+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return target;
+ case AVR32_BUILTIN_MTDR:
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
-+ {
-+ error ("Parameter 1 to __builtin_mtdr must be a constant number");
-+ return gen_reg_rtx (mode0);
-+ }
++ {
++ error ("Parameter 1 to __builtin_mtdr must be a constant number");
++ return gen_reg_rtx (mode0);
++ }
+ if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
-+ op1 = copy_to_mode_reg (mode1, op1);
++ op1 = copy_to_mode_reg (mode1, op1);
+ pat = GEN_FCN (icode) (op0, op1);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_CACHE:
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
-+ {
-+ error ("Parameter 2 to __builtin_cache must be a constant number");
-+ return gen_reg_rtx (mode1);
-+ }
++ {
++ error ("Parameter 2 to __builtin_cache must be a constant number");
++ return gen_reg_rtx (mode1);
++ }
+
+ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
-+ op0 = copy_to_mode_reg (mode0, op0);
++ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (op0, op1);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_SYNC:
+ case AVR32_BUILTIN_MUSFR:
-+ {
-+ const char *fname;
-+ switch (fcode)
-+ {
-+ default:
-+ case AVR32_BUILTIN_SYNC:
-+ icode = CODE_FOR_sync;
-+ fname = "sync";
-+ break;
-+ case AVR32_BUILTIN_MUSFR:
-+ icode = CODE_FOR_musfr;
-+ fname = "musfr";
-+ break;
-+ }
-+
-+ arg0 = TREE_VALUE (arglist);
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ mode0 = insn_data[icode].operand[0].mode;
++ case AVR32_BUILTIN_SSRF:
++ case AVR32_BUILTIN_CSRF:
++ {
++ const char *fname;
++ switch (fcode)
++ {
++ default:
++ case AVR32_BUILTIN_SYNC:
++ icode = CODE_FOR_sync;
++ fname = "sync";
++ break;
++ case AVR32_BUILTIN_MUSFR:
++ icode = CODE_FOR_musfr;
++ fname = "musfr";
++ break;
++ case AVR32_BUILTIN_SSRF:
++ icode = CODE_FOR_ssrf;
++ fname = "ssrf";
++ break;
++ case AVR32_BUILTIN_CSRF:
++ icode = CODE_FOR_csrf;
++ fname = "csrf";
++ break;
++ }
+
-+ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
-+ {
-+ if (icode == CODE_FOR_musfr)
-+ op0 = copy_to_mode_reg (mode0, op0);
-+ else
-+ {
-+ error ("Parameter to __builtin_%s is illegal.", fname);
-+ return gen_reg_rtx (mode0);
-+ }
-+ }
-+ pat = GEN_FCN (icode) (op0);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
-+ return NULL_RTX;
-+ }
++ arg0 = TREE_VALUE (arglist);
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ mode0 = insn_data[icode].operand[0].mode;
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
++ {
++ if (icode == CODE_FOR_musfr)
++ op0 = copy_to_mode_reg (mode0, op0);
++ else
++ {
++ error ("Parameter to __builtin_%s is illegal.", fname);
++ return gen_reg_rtx (mode0);
++ }
++ }
++ pat = GEN_FCN (icode) (op0);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return NULL_RTX;
++ }
+ case AVR32_BUILTIN_TLBR:
+ icode = CODE_FOR_tlbr;
+ pat = GEN_FCN (icode) (NULL_RTX);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_TLBS:
+ icode = CODE_FOR_tlbs;
+ pat = GEN_FCN (icode) (NULL_RTX);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_TLBW:
+ icode = CODE_FOR_tlbw;
+ pat = GEN_FCN (icode) (NULL_RTX);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_BREAKPOINT:
+ icode = CODE_FOR_breakpoint;
+ pat = GEN_FCN (icode) (NULL_RTX);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_XCHG:
+ mode1 = insn_data[icode].operand[2].mode;
+
+ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
-+ {
-+ op1 = copy_to_mode_reg (mode1, op1);
-+ }
++ {
++ op1 = copy_to_mode_reg (mode1, op1);
++ }
+
-+ op0 = gen_rtx_MEM (SImode, op0);
++ op0 = force_reg (GET_MODE (op0), op0);
++ op0 = gen_rtx_MEM (GET_MODE (op0), op0);
+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
-+ error
-+ ("Parameter 1 to __builtin_xchg must be a pointer to an integer.");
-+ }
++ {
++ error
++ ("Parameter 1 to __builtin_xchg must be a pointer to an integer.");
++ }
+
+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return target;
+ case AVR32_BUILTIN_LDXI:
+ mode1 = insn_data[icode].operand[2].mode;
+
+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
-+ op0 = copy_to_mode_reg (mode0, op0);
-+ }
++ {
++ op0 = copy_to_mode_reg (mode0, op0);
++ }
+
+ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
-+ {
-+ op1 = copy_to_mode_reg (mode1, op1);
-+ }
++ {
++ op1 = copy_to_mode_reg (mode1, op1);
++ }
+
+ if (!(*insn_data[icode].operand[3].predicate) (op2, SImode))
-+ {
-+ error
-+ ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
-+ return gen_reg_rtx (mode0);
-+ }
++ {
++ error
++ ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
++ return gen_reg_rtx (mode0);
++ }
+
+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1, op2);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return target;
+ case AVR32_BUILTIN_BSWAP16:
-+ {
-+ icode = CODE_FOR_bswap_16;
-+ arg0 = TREE_VALUE (arglist);
-+ arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
-+ mode0 = insn_data[icode].operand[1].mode;
-+ if (arg0_mode != mode0)
-+ arg0 = build1 (NOP_EXPR,
-+ (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
++ {
++ icode = CODE_FOR_bswap_16;
++ arg0 = TREE_VALUE (arglist);
++ arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
++ mode0 = insn_data[icode].operand[1].mode;
++ if (arg0_mode != mode0)
++ arg0 = build1 (NOP_EXPR,
++ (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
+
-+ op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
-+ tmode = insn_data[icode].operand[0].mode;
++ op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
++ tmode = insn_data[icode].operand[0].mode;
+
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ if ( CONST_INT_P (op0) )
++ {
++ HOST_WIDE_INT val = ( ((INTVAL (op0)&0x00ff) << 8) |
++ ((INTVAL (op0)&0xff00) >> 8) );
++ /* Sign extend 16-bit value to host wide int */
++ val <<= (HOST_BITS_PER_WIDE_INT - 16);
++ val >>= (HOST_BITS_PER_WIDE_INT - 16);
++ op0 = GEN_INT(val);
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++ emit_move_insn(target, op0);
++ return target;
++ }
++ else
+ op0 = copy_to_mode_reg (mode0, op0);
-+ }
++ }
+
-+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ {
-+ target = gen_reg_rtx (tmode);
-+ }
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ {
++ target = gen_reg_rtx (tmode);
++ }
+
+
-+ pat = GEN_FCN (icode) (target, op0);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
++ pat = GEN_FCN (icode) (target, op0);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
+
-+ return target;
-+ }
++ return target;
++ }
+ case AVR32_BUILTIN_BSWAP32:
-+ {
-+ icode = CODE_FOR_bswap_32;
-+ arg0 = TREE_VALUE (arglist);
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ tmode = insn_data[icode].operand[0].mode;
-+ mode0 = insn_data[icode].operand[1].mode;
-+
-+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
++ {
++ icode = CODE_FOR_bswap_32;
++ arg0 = TREE_VALUE (arglist);
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ tmode = insn_data[icode].operand[0].mode;
++ mode0 = insn_data[icode].operand[1].mode;
++
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ if ( CONST_INT_P (op0) )
++ {
++ HOST_WIDE_INT val = ( ((INTVAL (op0)&0x000000ff) << 24) |
++ ((INTVAL (op0)&0x0000ff00) << 8) |
++ ((INTVAL (op0)&0x00ff0000) >> 8) |
++ ((INTVAL (op0)&0xff000000) >> 24) );
++ /* Sign extend 32-bit value to host wide int */
++ val <<= (HOST_BITS_PER_WIDE_INT - 32);
++ val >>= (HOST_BITS_PER_WIDE_INT - 32);
++ op0 = GEN_INT(val);
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++ emit_move_insn(target, op0);
++ return target;
++ }
++ else
+ op0 = copy_to_mode_reg (mode0, op0);
-+ }
++ }
+
-+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+
+
-+ pat = GEN_FCN (icode) (target, op0);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
++ pat = GEN_FCN (icode) (target, op0);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
+
-+ return target;
-+ }
++ return target;
++ }
+ case AVR32_BUILTIN_MVCR_W:
+ case AVR32_BUILTIN_MVCR_D:
-+ {
-+ arg0 = TREE_VALUE (arglist);
-+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ {
++ arg0 = TREE_VALUE (arglist);
++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+
-+ if (fcode == AVR32_BUILTIN_MVCR_W)
-+ icode = CODE_FOR_mvcrsi;
-+ else
-+ icode = CODE_FOR_mvcrdi;
++ if (fcode == AVR32_BUILTIN_MVCR_W)
++ icode = CODE_FOR_mvcrsi;
++ else
++ icode = CODE_FOR_mvcrdi;
+
-+ tmode = insn_data[icode].operand[0].mode;
++ tmode = insn_data[icode].operand[0].mode;
+
-+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
-+ {
-+ error
-+ ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
-+ error ("Number should be between 0 and 7.");
-+ return NULL_RTX;
-+ }
++ if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
++ {
++ error
++ ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
++ error ("Number should be between 0 and 7.");
++ return NULL_RTX;
++ }
+
-+ if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
-+ {
-+ error
-+ ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
-+ error ("Number should be between 0 and 15.");
-+ return NULL_RTX;
-+ }
++ if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
++ {
++ error
++ ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
+
-+ pat = GEN_FCN (icode) (target, op0, op1);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
++ pat = GEN_FCN (icode) (target, op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
+
-+ return target;
-+ }
++ return target;
++ }
+ case AVR32_BUILTIN_MACSATHH_W:
+ case AVR32_BUILTIN_MACWH_D:
+ case AVR32_BUILTIN_MACHH_D:
-+ {
-+ arg0 = TREE_VALUE (arglist);
-+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
-+ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
-+
-+ icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
-+ (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
-+ CODE_FOR_machh_d);
++ {
++ arg0 = TREE_VALUE (arglist);
++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
++
++ icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
++ (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
++ CODE_FOR_machh_d);
++
++ tmode = insn_data[icode].operand[0].mode;
++ mode0 = insn_data[icode].operand[1].mode;
++ mode1 = insn_data[icode].operand[2].mode;
++
++
++ if (!target
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
++ {
++ /* If op0 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op0))
++ op0 = convert_to_mode (tmode, op0, 1);
++ else
++ op0 = copy_to_mode_reg (tmode, op0);
++ }
+
-+ tmode = insn_data[icode].operand[0].mode;
-+ mode0 = insn_data[icode].operand[1].mode;
-+ mode1 = insn_data[icode].operand[2].mode;
++ if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
++ {
++ /* If op1 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op1))
++ op1 = convert_to_mode (mode0, op1, 1);
++ else
++ op1 = copy_to_mode_reg (mode0, op1);
++ }
+
++ if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
++ {
++ /* If op1 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op2))
++ op2 = convert_to_mode (mode1, op2, 1);
++ else
++ op2 = copy_to_mode_reg (mode1, op2);
++ }
+
-+ if (!target
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ emit_move_insn (target, op0);
+
-+ if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
-+ {
-+ /* If op0 is already a reg we must cast it to the correct mode. */
-+ if (REG_P (op0))
-+ op0 = convert_to_mode (tmode, op0, 1);
-+ else
-+ op0 = copy_to_mode_reg (tmode, op0);
-+ }
++ pat = GEN_FCN (icode) (target, op1, op2);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++ }
++ case AVR32_BUILTIN_MVRC_W:
++ case AVR32_BUILTIN_MVRC_D:
++ {
++ arg0 = TREE_VALUE (arglist);
++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
++
++ if (fcode == AVR32_BUILTIN_MVRC_W)
++ icode = CODE_FOR_mvrcsi;
++ else
++ icode = CODE_FOR_mvrcdi;
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
-+ {
-+ /* If op1 is already a reg we must cast it to the correct mode. */
-+ if (REG_P (op1))
-+ op1 = convert_to_mode (mode0, op1, 1);
-+ else
-+ op1 = copy_to_mode_reg (mode0, op1);
-+ }
++ if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
++ {
++ error ("Parameter 1 is not a valid coprocessor number.");
++ error ("Number should be between 0 and 7.");
++ return NULL_RTX;
++ }
+
-+ if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
-+ {
-+ /* If op1 is already a reg we must cast it to the correct mode. */
-+ if (REG_P (op2))
-+ op2 = convert_to_mode (mode1, op2, 1);
-+ else
-+ op2 = copy_to_mode_reg (mode1, op2);
-+ }
++ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
++ {
++ error ("Parameter 2 is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
+
-+ emit_move_insn (target, op0);
++ if (GET_CODE (op2) == CONST_INT
++ || GET_CODE (op2) == CONST
++ || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
++ {
++ op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
++ }
+
-+ pat = GEN_FCN (icode) (target, op1, op2);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
-+ return target;
-+ }
-+ case AVR32_BUILTIN_MVRC_W:
-+ case AVR32_BUILTIN_MVRC_D:
-+ {
-+ arg0 = TREE_VALUE (arglist);
-+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
-+ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
-+
-+ if (fcode == AVR32_BUILTIN_MVRC_W)
-+ icode = CODE_FOR_mvrcsi;
-+ else
-+ icode = CODE_FOR_mvrcdi;
++ if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
++ op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
+
-+ if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
-+ {
-+ error ("Parameter 1 is not a valid coprocessor number.");
-+ error ("Number should be between 0 and 7.");
-+ return NULL_RTX;
-+ }
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
-+ {
-+ error ("Parameter 2 is not a valid coprocessor register number.");
-+ error ("Number should be between 0 and 15.");
-+ return NULL_RTX;
-+ }
++ pat = GEN_FCN (icode) (op0, op1, op2);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
+
-+ if (GET_CODE (op2) == CONST_INT
-+ || GET_CODE (op2) == CONST
-+ || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
-+ {
-+ op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
-+ }
++ return NULL_RTX;
++ }
++ case AVR32_BUILTIN_COP:
++ {
++ rtx op3, op4;
++ tree arg3, arg4;
++ icode = CODE_FOR_cop;
++ arg0 = TREE_VALUE (arglist);
++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
++ arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
++ arg4 =
++ TREE_VALUE (TREE_CHAIN
++ (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))));
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
++ op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0);
++ op4 = expand_expr (arg4, NULL_RTX, VOIDmode, 0);
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
++ {
++ error
++ ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
++ error ("Number should be between 0 and 7.");
++ return NULL_RTX;
++ }
+
-+ if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
-+ op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
++ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
++ {
++ error
++ ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
+
++ if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
++ {
++ error
++ ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
+
-+ pat = GEN_FCN (icode) (op0, op1, op2);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
++ if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
++ {
++ error
++ ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
+
-+ return NULL_RTX;
++ if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
++ {
++ error
++ ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
++ error ("Number should be between 0 and 127.");
++ return NULL_RTX;
++ }
++
++ pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++
++ return target;
++ }
++ case AVR32_BUILTIN_MEMS:
++ case AVR32_BUILTIN_MEMC:
++ case AVR32_BUILTIN_MEMT:
++ {
++ if (!TARGET_RMW)
++ error ("Trying to use __builtin_mem(s/c/t) when target does not support RMW insns.");
++
++ switch (fcode) {
++ case AVR32_BUILTIN_MEMS:
++ icode = CODE_FOR_iorsi3;
++ break;
++ case AVR32_BUILTIN_MEMC:
++ icode = CODE_FOR_andsi3;
++ break;
++ case AVR32_BUILTIN_MEMT:
++ icode = CODE_FOR_xorsi3;
++ break;
+ }
-+ case AVR32_BUILTIN_COP:
-+ {
-+ rtx op3, op4;
-+ tree arg3, arg4;
-+ icode = CODE_FOR_cop;
-+ arg0 = TREE_VALUE (arglist);
-+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
-+ arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
-+ arg4 =
-+ TREE_VALUE (TREE_CHAIN
-+ (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))));
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
-+ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
-+ op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0);
-+ op4 = expand_expr (arg4, NULL_RTX, VOIDmode, 0);
-+
-+ if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
-+ {
-+ error
-+ ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
-+ error ("Number should be between 0 and 7.");
-+ return NULL_RTX;
-+ }
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
-+ {
-+ error
-+ ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
-+ error ("Number should be between 0 and 15.");
-+ return NULL_RTX;
-+ }
++ arg0 = TREE_VALUE (arglist);
++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ if ( GET_CODE (op0) == SYMBOL_REF )
++ // This symbol must be RMW addressable
++ SYMBOL_REF_FLAGS (op0) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
++ op0 = gen_rtx_MEM(SImode, op0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ mode0 = insn_data[icode].operand[1].mode;
+
-+ if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
-+ {
-+ error
-+ ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
-+ error ("Number should be between 0 and 15.");
-+ return NULL_RTX;
-+ }
+
-+ if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
-+ {
-+ error
-+ ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
-+ error ("Number should be between 0 and 15.");
-+ return NULL_RTX;
-+ }
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ error ("Parameter 1 to __builtin_mem(s/c/t) must be a Ks15<<2 address or a rmw addressable symbol.");
++ }
+
-+ if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
-+ {
-+ error
-+ ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
-+ error ("Number should be between 0 and 127.");
-+ return NULL_RTX;
-+ }
++ if ( !CONST_INT_P (op1)
++ || INTVAL (op1) > 31
++ || INTVAL (op1) < 0 )
++ error ("Parameter 2 to __builtin_mem(s/c/t) must be a constant between 0 and 31.");
+
-+ pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
++ if ( fcode == AVR32_BUILTIN_MEMC )
++ op1 = GEN_INT((~(1 << INTVAL(op1)))&0xffffffff);
++ else
++ op1 = GEN_INT((1 << INTVAL(op1))&0xffffffff);
++ pat = GEN_FCN (icode) (op0, op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return op0;
++ }
+
-+ return target;
-+ }
+ }
+
-+ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
-+ if (d->code == fcode)
-+ return avr32_expand_binop_builtin (d->icode, arglist, target);
++ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
++ if (d->code == fcode)
++ return avr32_expand_binop_builtin (d->icode, arglist, target);
+
+
-+ /* @@@ Should really do something sensible here. */
-+ return NULL_RTX;
-+ }
++ /* @@@ Should really do something sensible here. */
++ return NULL_RTX;
++}
+
+
+/* Handle an "interrupt" or "isr" attribute;
+
+static tree
+avr32_handle_isr_attribute (tree * node, tree name, tree args,
-+ int flags, bool * no_add_attrs)
-+ {
-+ if (DECL_P (*node))
-+ {
-+ if (TREE_CODE (*node) != FUNCTION_DECL)
-+ {
-+ warning ("`%s' attribute only applies to functions",
-+ IDENTIFIER_POINTER (name));
-+ *no_add_attrs = true;
-+ }
-+ /* FIXME: the argument if any is checked for type attributes; should it
++ int flags, bool * no_add_attrs)
++{
++ if (DECL_P (*node))
++ {
++ if (TREE_CODE (*node) != FUNCTION_DECL)
++ {
++ warning (OPT_Wattributes,"`%s' attribute only applies to functions",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ }
++ /* FIXME: the argument if any is checked for type attributes; should it
+ be checked for decl ones? */
-+ }
-+ else
-+ {
-+ if (TREE_CODE (*node) == FUNCTION_TYPE
-+ || TREE_CODE (*node) == METHOD_TYPE)
-+ {
-+ if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
-+ {
-+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
-+ *no_add_attrs = true;
-+ }
-+ }
-+ else if (TREE_CODE (*node) == POINTER_TYPE
-+ && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
-+ || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
-+ && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
-+ {
-+ *node = build_variant_type_copy (*node);
-+ TREE_TYPE (*node) = build_type_attribute_variant
-+ (TREE_TYPE (*node),
-+ tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
-+ *no_add_attrs = true;
-+ }
-+ else
-+ {
-+ /* Possibly pass this attribute on from the type to a decl. */
-+ if (flags & ((int) ATTR_FLAG_DECL_NEXT
-+ | (int) ATTR_FLAG_FUNCTION_NEXT
-+ | (int) ATTR_FLAG_ARRAY_NEXT))
-+ {
-+ *no_add_attrs = true;
-+ return tree_cons (name, args, NULL_TREE);
-+ }
-+ else
-+ {
-+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
-+ }
-+ }
-+ }
++ }
++ else
++ {
++ if (TREE_CODE (*node) == FUNCTION_TYPE
++ || TREE_CODE (*node) == METHOD_TYPE)
++ {
++ if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
++ {
++ warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ }
++ }
++ else if (TREE_CODE (*node) == POINTER_TYPE
++ && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
++ || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
++ && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
++ {
++ *node = build_variant_type_copy (*node);
++ TREE_TYPE (*node) = build_type_attribute_variant
++ (TREE_TYPE (*node),
++ tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
++ *no_add_attrs = true;
++ }
++ else
++ {
++ /* Possibly pass this attribute on from the type to a decl. */
++ if (flags & ((int) ATTR_FLAG_DECL_NEXT
++ | (int) ATTR_FLAG_FUNCTION_NEXT
++ | (int) ATTR_FLAG_ARRAY_NEXT))
++ {
++ *no_add_attrs = true;
++ return tree_cons (name, args, NULL_TREE);
++ }
++ else
++ {
++ warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
++ }
++ }
++ }
+
-+ return NULL_TREE;
-+ }
++ return NULL_TREE;
++}
+
+/* Handle an attribute requiring a FUNCTION_DECL;
+ arguments as in struct attribute_spec.handler. */
+static tree
+avr32_handle_fndecl_attribute (tree * node, tree name,
-+ tree args ATTRIBUTE_UNUSED,
-+ int flags ATTRIBUTE_UNUSED,
-+ bool * no_add_attrs)
-+ {
-+ if (TREE_CODE (*node) != FUNCTION_DECL)
-+ {
-+ warning ("%qs attribute only applies to functions",
-+ IDENTIFIER_POINTER (name));
-+ *no_add_attrs = true;
-+ }
++ tree args ATTRIBUTE_UNUSED,
++ int flags ATTRIBUTE_UNUSED,
++ bool * no_add_attrs)
++{
++ if (TREE_CODE (*node) != FUNCTION_DECL)
++ {
++ warning (OPT_Wattributes,"%qs attribute only applies to functions",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ }
+
-+ return NULL_TREE;
-+ }
++ return NULL_TREE;
++}
+
+
+/* Handle an acall attribute;
+
+static tree
+avr32_handle_acall_attribute (tree * node, tree name,
-+ tree args ATTRIBUTE_UNUSED,
-+ int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
-+ {
-+ if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
-+ {
-+ warning ("`%s' attribute not yet supported...",
-+ IDENTIFIER_POINTER (name));
-+ *no_add_attrs = true;
-+ return NULL_TREE;
-+ }
++ tree args ATTRIBUTE_UNUSED,
++ int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
++{
++ if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
++ {
++ warning (OPT_Wattributes,"`%s' attribute not yet supported...",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ return NULL_TREE;
++ }
+
-+ warning ("`%s' attribute only applies to functions",
-+ IDENTIFIER_POINTER (name));
-+ *no_add_attrs = true;
-+ return NULL_TREE;
-+ }
++ warning (OPT_Wattributes,"`%s' attribute only applies to functions",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ return NULL_TREE;
++}
+
+
+/* Return 0 if the attributes for two types are incompatible, 1 if they
+
+static int
+avr32_comp_type_attributes (tree type1, tree type2)
-+ {
-+ int acall1, acall2, isr1, isr2, naked1, naked2;
++{
++ int acall1, acall2, isr1, isr2, naked1, naked2;
+
-+ /* Check for mismatch of non-default calling convention. */
-+ if (TREE_CODE (type1) != FUNCTION_TYPE)
-+ return 1;
++ /* Check for mismatch of non-default calling convention. */
++ if (TREE_CODE (type1) != FUNCTION_TYPE)
++ return 1;
+
-+ /* Check for mismatched call attributes. */
-+ acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
-+ acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
-+ naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
-+ naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
-+ isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
-+ if (!isr1)
-+ isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
-+
-+ isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
-+ if (!isr2)
-+ isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
-+
-+ if ((acall1 && isr2)
-+ || (acall2 && isr1) || (naked1 && isr2) || (naked2 && isr1))
-+ return 0;
++ /* Check for mismatched call attributes. */
++ acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
++ acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
++ naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
++ naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
++ isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
++ if (!isr1)
++ isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
++
++ isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
++ if (!isr2)
++ isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
++
++ if ((acall1 && isr2)
++ || (acall2 && isr1) || (naked1 && isr2) || (naked2 && isr1))
++ return 0;
+
-+ return 1;
-+ }
++ return 1;
++}
+
+
+/* Computes the type of the current function. */
+
+static unsigned long
+avr32_compute_func_type (void)
-+ {
-+ unsigned long type = AVR32_FT_UNKNOWN;
-+ tree a;
-+ tree attr;
++{
++ unsigned long type = AVR32_FT_UNKNOWN;
++ tree a;
++ tree attr;
+
-+ if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
-+ abort ();
++ if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
++ abort ();
+
-+ /* Decide if the current function is volatile. Such functions never
++ /* Decide if the current function is volatile. Such functions never
+ return, and many memory cycles can be saved by not storing register
+ values that will never be needed again. This optimization was added to
+ speed up context switching in a kernel application. */
-+ if (optimize > 0
-+ && TREE_NOTHROW (current_function_decl)
-+ && TREE_THIS_VOLATILE (current_function_decl))
-+ type |= AVR32_FT_VOLATILE;
++ if (optimize > 0
++ && TREE_NOTHROW (current_function_decl)
++ && TREE_THIS_VOLATILE (current_function_decl))
++ type |= AVR32_FT_VOLATILE;
+
-+ if (cfun->static_chain_decl != NULL)
-+ type |= AVR32_FT_NESTED;
++ if (cfun->static_chain_decl != NULL)
++ type |= AVR32_FT_NESTED;
+
-+ attr = DECL_ATTRIBUTES (current_function_decl);
++ attr = DECL_ATTRIBUTES (current_function_decl);
+
-+ a = lookup_attribute ("isr", attr);
-+ if (a == NULL_TREE)
-+ a = lookup_attribute ("interrupt", attr);
++ a = lookup_attribute ("isr", attr);
++ if (a == NULL_TREE)
++ a = lookup_attribute ("interrupt", attr);
+
-+ if (a == NULL_TREE)
-+ type |= AVR32_FT_NORMAL;
-+ else
-+ type |= avr32_isr_value (TREE_VALUE (a));
++ if (a == NULL_TREE)
++ type |= AVR32_FT_NORMAL;
++ else
++ type |= avr32_isr_value (TREE_VALUE (a));
+
+
-+ a = lookup_attribute ("acall", attr);
-+ if (a != NULL_TREE)
-+ type |= AVR32_FT_ACALL;
++ a = lookup_attribute ("acall", attr);
++ if (a != NULL_TREE)
++ type |= AVR32_FT_ACALL;
+
-+ a = lookup_attribute ("naked", attr);
-+ if (a != NULL_TREE)
-+ type |= AVR32_FT_NAKED;
++ a = lookup_attribute ("naked", attr);
++ if (a != NULL_TREE)
++ type |= AVR32_FT_NAKED;
+
-+ return type;
-+ }
++ return type;
++}
+
+/* Returns the type of the current function. */
+
+static unsigned long
+avr32_current_func_type (void)
-+ {
-+ if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
-+ cfun->machine->func_type = avr32_compute_func_type ();
++{
++ if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
++ cfun->machine->func_type = avr32_compute_func_type ();
+
-+ return cfun->machine->func_type;
-+ }
++ return cfun->machine->func_type;
++}
+
+/*
+ This target hook should return true if we should not pass type solely
+ in registers. The file expr.h defines a definition that is usually appropriate,
+ refer to expr.h for additional documentation.
-+ */
++*/
+bool
+avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
-+ {
-+ if (type && AGGREGATE_TYPE_P (type)
-+ /* If the alignment is less than the size then pass in the struct on
++{
++ if (type && AGGREGATE_TYPE_P (type)
++ /* If the alignment is less than the size then pass in the struct on
+ the stack. */
-+ && ((unsigned int) TYPE_ALIGN_UNIT (type) <
-+ (unsigned int) int_size_in_bytes (type))
-+ /* If we support unaligned word accesses then structs of size 4 and 8
++ && ((unsigned int) TYPE_ALIGN_UNIT (type) <
++ (unsigned int) int_size_in_bytes (type))
++ /* If we support unaligned word accesses then structs of size 4 and 8
+ can have any alignment and still be passed in registers. */
-+ && !(TARGET_UNALIGNED_WORD
-+ && (int_size_in_bytes (type) == 4
-+ || int_size_in_bytes (type) == 8))
-+ /* Double word structs need only a word alignment. */
-+ && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
-+ return true;
++ && !(TARGET_UNALIGNED_WORD
++ && (int_size_in_bytes (type) == 4
++ || int_size_in_bytes (type) == 8))
++ /* Double word structs need only a word alignment. */
++ && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
++ return true;
+
-+ if (type && AGGREGATE_TYPE_P (type)
-+ /* Structs of size 3,5,6,7 are always passed in registers. */
-+ && (int_size_in_bytes (type) == 3
-+ || int_size_in_bytes (type) == 5
-+ || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
-+ return true;
++ if (type && AGGREGATE_TYPE_P (type)
++ /* Structs of size 3,5,6,7 are always passed in registers. */
++ && (int_size_in_bytes (type) == 3
++ || int_size_in_bytes (type) == 5
++ || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
++ return true;
+
+
-+ return (type && TREE_ADDRESSABLE (type));
-+ }
++ return (type && TREE_ADDRESSABLE (type));
++}
+
+
+bool
+avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
-+ {
-+ return true;
-+ }
++{
++ return true;
++}
+
+/*
+ This target hook should return true if an argument at the position indicated
+ If the hook returns true, a copy of that argument is made in memory and a
+ pointer to the argument is passed instead of the argument itself. The pointer
+ is passed in whatever way is appropriate for passing a pointer to that type.
-+ */
++*/
+bool
+avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
-+ enum machine_mode mode ATTRIBUTE_UNUSED,
-+ tree type, bool named ATTRIBUTE_UNUSED)
-+ {
-+ return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
-+ }
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ tree type, bool named ATTRIBUTE_UNUSED)
++{
++ return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
++}
+
+static int
+avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED,
-+ enum machine_mode mode ATTRIBUTE_UNUSED,
-+ tree type ATTRIBUTE_UNUSED,
-+ bool named ATTRIBUTE_UNUSED)
-+ {
-+ return 0;
-+ }
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ tree type ATTRIBUTE_UNUSED,
++ bool named ATTRIBUTE_UNUSED)
++{
++ return 0;
++}
+
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+/*
+ Table used to convert from register number in the assembler instructions and
+ the register numbers used in gcc.
-+ */
++*/
+const int avr32_function_arg_reglist[] = {
-+ INTERNAL_REGNUM (12),
-+ INTERNAL_REGNUM (11),
-+ INTERNAL_REGNUM (10),
-+ INTERNAL_REGNUM (9),
-+ INTERNAL_REGNUM (8)
++ INTERNAL_REGNUM (12),
++ INTERNAL_REGNUM (11),
++ INTERNAL_REGNUM (10),
++ INTERNAL_REGNUM (9),
++ INTERNAL_REGNUM (8)
+};
+
+rtx avr32_compare_op0 = NULL_RTX;
+/*
+ Returns nonzero if it is allowed to store a value of mode mode in hard
+ register number regno.
-+ */
++*/
+int
+avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode)
-+ {
-+ /* We allow only float modes in the fp-registers */
-+ if (regnr >= FIRST_FP_REGNUM
-+ && regnr <= LAST_FP_REGNUM && GET_MODE_CLASS (mode) != MODE_FLOAT)
-+ {
-+ return 0;
-+ }
++{
++ /* We allow only float modes in the fp-registers */
++ if (regnr >= FIRST_FP_REGNUM
++ && regnr <= LAST_FP_REGNUM && GET_MODE_CLASS (mode) != MODE_FLOAT)
++ {
++ return 0;
++ }
+
-+ switch (mode)
++ switch (mode)
+ {
+ case DImode: /* long long */
+ case DFmode: /* double */
+ case SCmode: /* __complex__ float */
+ case CSImode: /* __complex__ int */
+ if (regnr < 4)
-+ { /* long long int not supported in r12, sp, lr
++ { /* long long int not supported in r12, sp, lr
+ or pc. */
-+ return 0;
-+ }
++ return 0;
++ }
+ else
-+ {
-+ if (regnr % 2) /* long long int has to be refered in even
++ {
++ if (regnr % 2) /* long long int has to be refered in even
+ registers. */
-+ return 0;
-+ else
-+ return 1;
-+ }
++ return 0;
++ else
++ return 1;
++ }
+ case CDImode: /* __complex__ long long */
+ case DCmode: /* __complex__ double */
+ case TImode: /* 16 bytes */
+ if (regnr < 7)
-+ return 0;
++ return 0;
+ else if (regnr % 2)
-+ return 0;
++ return 0;
+ else
-+ return 1;
++ return 1;
+ default:
+ return 1;
+ }
-+ }
++}
+
+
+int
+avr32_rnd_operands (rtx add, rtx shift)
-+ {
-+ if (GET_CODE (shift) == CONST_INT &&
-+ GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
-+ {
-+ if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
-+ return TRUE;
-+ }
++{
++ if (GET_CODE (shift) == CONST_INT &&
++ GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
++ {
++ if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
++ return TRUE;
++ }
+
-+ return FALSE;
-+ }
++ return FALSE;
++}
+
+
+
+int
+avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str)
-+ {
-+ switch (c)
++{
++ switch (c)
+ {
+ case 'K':
+ case 'I':
+ {
-+ HOST_WIDE_INT min_value = 0, max_value = 0;
-+ char size_str[3];
-+ int const_size;
-+
-+ size_str[0] = str[2];
-+ size_str[1] = str[3];
-+ size_str[2] = '\0';
-+ const_size = atoi (size_str);
-+
-+ if (toupper (str[1]) == 'U')
-+ {
-+ min_value = 0;
-+ max_value = (1 << const_size) - 1;
-+ }
-+ else if (toupper (str[1]) == 'S')
-+ {
-+ min_value = -(1 << (const_size - 1));
-+ max_value = (1 << (const_size - 1)) - 1;
-+ }
++ HOST_WIDE_INT min_value = 0, max_value = 0;
++ char size_str[3];
++ int const_size;
++
++ size_str[0] = str[2];
++ size_str[1] = str[3];
++ size_str[2] = '\0';
++ const_size = atoi (size_str);
++
++ if (toupper (str[1]) == 'U')
++ {
++ min_value = 0;
++ max_value = (1 << const_size) - 1;
++ }
++ else if (toupper (str[1]) == 'S')
++ {
++ min_value = -(1 << (const_size - 1));
++ max_value = (1 << (const_size - 1)) - 1;
++ }
+
-+ if (c == 'I')
-+ {
-+ value = -value;
-+ }
++ if (c == 'I')
++ {
++ value = -value;
++ }
+
-+ if (value >= min_value && value <= max_value)
-+ {
-+ return 1;
-+ }
-+ break;
++ if (value >= min_value && value <= max_value)
++ {
++ return 1;
++ }
++ break;
+ }
+ case 'M':
-+ return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
++ return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
++ case 'J':
++ return avr32_hi16_immediate_operand (GEN_INT (value), VOIDmode);
++ case 'O':
++ return one_bit_set_operand (GEN_INT (value), VOIDmode);
++ case 'N':
++ return one_bit_cleared_operand (GEN_INT (value), VOIDmode);
++ case 'L':
++ /* The lower 16-bits are set. */
++ return ((value & 0xffff) == 0xffff) ;
+ }
+
-+ return 0;
-+ }
++ return 0;
++}
+
+
+/*Compute mask of which floating-point registers needs saving upon
+ entry to this function*/
+static unsigned long
+avr32_compute_save_fp_reg_mask (void)
-+ {
-+ unsigned long func_type = avr32_current_func_type ();
-+ unsigned int save_reg_mask = 0;
-+ unsigned int reg;
-+ unsigned int max_reg = 7;
-+ int save_all_call_used_regs = FALSE;
-+
-+ /* This only applies for hardware floating-point implementation. */
-+ if (!TARGET_HARD_FLOAT)
-+ return 0;
++{
++ unsigned long func_type = avr32_current_func_type ();
++ unsigned int save_reg_mask = 0;
++ unsigned int reg;
++ unsigned int max_reg = 7;
++ int save_all_call_used_regs = FALSE;
++
++ /* This only applies for hardware floating-point implementation. */
++ if (!TARGET_HARD_FLOAT)
++ return 0;
+
-+ if (IS_INTERRUPT (func_type))
-+ {
++ if (IS_INTERRUPT (func_type))
++ {
+
-+ /* Interrupt functions must not corrupt any registers, even call
++ /* Interrupt functions must not corrupt any registers, even call
+ clobbered ones. If this is a leaf function we can just examine the
+ registers used by the RTL, but otherwise we have to assume that
+ whatever function is called might clobber anything, and so we have
+ to save all the call-clobbered registers as well. */
-+ max_reg = 13;
-+ save_all_call_used_regs = !current_function_is_leaf;
-+ }
++ max_reg = 13;
++ save_all_call_used_regs = !current_function_is_leaf;
++ }
+
-+ /* All used registers used must be saved */
-+ for (reg = 0; reg <= max_reg; reg++)
-+ if (regs_ever_live[INTERNAL_FP_REGNUM (reg)]
-+ || (save_all_call_used_regs
-+ && call_used_regs[INTERNAL_FP_REGNUM (reg)]))
-+ save_reg_mask |= (1 << reg);
++ /* All used registers used must be saved */
++ for (reg = 0; reg <= max_reg; reg++)
++ if (regs_ever_live[INTERNAL_FP_REGNUM (reg)]
++ || (save_all_call_used_regs
++ && call_used_regs[INTERNAL_FP_REGNUM (reg)]))
++ save_reg_mask |= (1 << reg);
+
-+ return save_reg_mask;
-+ }
++ return save_reg_mask;
++}
+
+/*Compute mask of registers which needs saving upon function entry */
+static unsigned long
+avr32_compute_save_reg_mask (int push)
-+ {
-+ unsigned long func_type;
-+ unsigned int save_reg_mask = 0;
-+ unsigned int reg;
-+
-+ func_type = avr32_current_func_type ();
-+
-+ if (IS_INTERRUPT (func_type))
-+ {
-+ unsigned int max_reg = 12;
++{
++ unsigned long func_type;
++ unsigned int save_reg_mask = 0;
++ unsigned int reg;
+
++ func_type = avr32_current_func_type ();
+
-+ /* Get the banking scheme for the interrupt */
-+ switch (func_type)
-+ {
-+ case AVR32_FT_ISR_FULL:
-+ max_reg = 0;
-+ break;
-+ case AVR32_FT_ISR_HALF:
-+ max_reg = 7;
-+ break;
-+ case AVR32_FT_ISR_NONE:
-+ max_reg = 12;
-+ break;
-+ }
++ if (IS_INTERRUPT (func_type))
++ {
++ unsigned int max_reg = 12;
++
++
++ /* Get the banking scheme for the interrupt */
++ switch (func_type)
++ {
++ case AVR32_FT_ISR_FULL:
++ max_reg = 0;
++ break;
++ case AVR32_FT_ISR_HALF:
++ max_reg = 7;
++ break;
++ case AVR32_FT_ISR_NONE:
++ max_reg = 12;
++ break;
++ }
+
-+ /* Interrupt functions must not corrupt any registers, even call
++ /* Interrupt functions must not corrupt any registers, even call
+ clobbered ones. If this is a leaf function we can just examine the
+ registers used by the RTL, but otherwise we have to assume that
+ whatever function is called might clobber anything, and so we have
+ to save all the call-clobbered registers as well. */
+
-+ /* Need not push the registers r8-r12 for AVR32A architectures, as this
++ /* Need not push the registers r8-r12 for AVR32A architectures, as this
+ is automatially done in hardware. We also do not have any shadow
+ registers. */
-+ if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A)
-+ {
-+ max_reg = 7;
-+ func_type = AVR32_FT_ISR_NONE;
-+ }
++ if (TARGET_UARCH_AVR32A)
++ {
++ max_reg = 7;
++ func_type = AVR32_FT_ISR_NONE;
++ }
+
-+ /* All registers which are used and is not shadowed must be saved */
-+ for (reg = 0; reg <= max_reg; reg++)
-+ if (regs_ever_live[INTERNAL_REGNUM (reg)]
-+ || (!current_function_is_leaf
-+ && call_used_regs[INTERNAL_REGNUM (reg)]))
-+ save_reg_mask |= (1 << reg);
-+
-+ /* Check LR */
-+ if ((regs_ever_live[LR_REGNUM]
-+ || !current_function_is_leaf || frame_pointer_needed)
-+ /* Only non-shadowed register models */
-+ && (func_type == AVR32_FT_ISR_NONE))
-+ save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
-+
-+ /* Make sure that the GOT register is pushed. */
-+ if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
-+ && current_function_uses_pic_offset_table)
-+ save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
++ /* All registers which are used and is not shadowed must be saved */
++ for (reg = 0; reg <= max_reg; reg++)
++ if (regs_ever_live[INTERNAL_REGNUM (reg)]
++ || (!current_function_is_leaf
++ && call_used_regs[INTERNAL_REGNUM (reg)]))
++ save_reg_mask |= (1 << reg);
++
++ /* Check LR */
++ if ((regs_ever_live[LR_REGNUM]
++ || !current_function_is_leaf || frame_pointer_needed)
++ /* Only non-shadowed register models */
++ && (func_type == AVR32_FT_ISR_NONE))
++ save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
++
++ /* Make sure that the GOT register is pushed. */
++ if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
++ && current_function_uses_pic_offset_table)
++ save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
+
-+ }
-+ else
-+ {
-+ int use_pushm = optimize_size;
++ }
++ else
++ {
++ int use_pushm = optimize_size;
+
-+ /* In the normal case we only need to save those registers which are
++ /* In the normal case we only need to save those registers which are
+ call saved and which are used by this function. */
-+ for (reg = 0; reg <= 7; reg++)
-+ if (regs_ever_live[INTERNAL_REGNUM (reg)]
-+ && !call_used_regs[INTERNAL_REGNUM (reg)])
-+ save_reg_mask |= (1 << reg);
++ for (reg = 0; reg <= 7; reg++)
++ if (regs_ever_live[INTERNAL_REGNUM (reg)]
++ && !call_used_regs[INTERNAL_REGNUM (reg)])
++ save_reg_mask |= (1 << reg);
+
-+ /* Make sure that the GOT register is pushed. */
-+ if (current_function_uses_pic_offset_table)
-+ save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
++ /* Make sure that the GOT register is pushed. */
++ if (current_function_uses_pic_offset_table)
++ save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
+
+
-+ /* If we optimize for size and do not have anonymous arguments: use
++ /* If we optimize for size and do not have anonymous arguments: use
+ popm/pushm always */
-+ if (use_pushm)
-+ {
-+ if ((save_reg_mask & (1 << 0))
-+ || (save_reg_mask & (1 << 1))
-+ || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
-+ save_reg_mask |= 0xf;
-+
-+ if ((save_reg_mask & (1 << 4))
-+ || (save_reg_mask & (1 << 5))
-+ || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
-+ save_reg_mask |= 0xf0;
-+
-+ if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
-+ save_reg_mask |= 0x300;
-+ }
++ if (use_pushm)
++ {
++ if ((save_reg_mask & (1 << 0))
++ || (save_reg_mask & (1 << 1))
++ || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
++ save_reg_mask |= 0xf;
++
++ if ((save_reg_mask & (1 << 4))
++ || (save_reg_mask & (1 << 5))
++ || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
++ save_reg_mask |= 0xf0;
++
++ if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
++ save_reg_mask |= 0x300;
++ }
+
+
-+ /* Check LR */
-+ if ((regs_ever_live[LR_REGNUM]
-+ || !current_function_is_leaf
-+ || (optimize_size
-+ && save_reg_mask
-+ && !current_function_calls_eh_return) || frame_pointer_needed))
-+ {
-+ if (push
-+ /* Never pop LR into PC for functions which
++ /* Check LR */
++ if ((regs_ever_live[LR_REGNUM]
++ || !current_function_is_leaf
++ || (optimize_size
++ && save_reg_mask
++ && !current_function_calls_eh_return) || frame_pointer_needed))
++ {
++ if (push
++ /* Never pop LR into PC for functions which
+ calls __builtin_eh_return, since we need to
+ fix the SP after the restoring of the registers
+ and before returning. */
-+ || current_function_calls_eh_return)
-+ {
-+ /* Push/Pop LR */
-+ save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
-+ }
-+ else
-+ {
-+ /* Pop PC */
-+ save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
-+ }
-+ }
-+ }
-+
++ || current_function_calls_eh_return)
++ {
++ /* Push/Pop LR */
++ save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
++ }
++ else
++ {
++ /* Pop PC */
++ save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
++ }
++ }
++ }
+
-+ /* Save registers so the exception handler can modify them. */
-+ if (current_function_calls_eh_return)
-+ {
-+ unsigned int i;
+
-+ for (i = 0;; i++)
-+ {
-+ reg = EH_RETURN_DATA_REGNO (i);
-+ if (reg == INVALID_REGNUM)
-+ break;
-+ save_reg_mask |= 1 << ASM_REGNUM (reg);
-+ }
-+ }
++ /* Save registers so the exception handler can modify them. */
++ if (current_function_calls_eh_return)
++ {
++ unsigned int i;
++
++ for (i = 0;; i++)
++ {
++ reg = EH_RETURN_DATA_REGNO (i);
++ if (reg == INVALID_REGNUM)
++ break;
++ save_reg_mask |= 1 << ASM_REGNUM (reg);
++ }
++ }
+
-+ return save_reg_mask;
-+ }
++ return save_reg_mask;
++}
+
+/*Compute total size in bytes of all saved registers */
+static int
+avr32_get_reg_mask_size (int reg_mask)
-+ {
-+ int reg, size;
-+ size = 0;
++{
++ int reg, size;
++ size = 0;
+
-+ for (reg = 0; reg <= 15; reg++)
-+ if (reg_mask & (1 << reg))
-+ size += 4;
++ for (reg = 0; reg <= 15; reg++)
++ if (reg_mask & (1 << reg))
++ size += 4;
+
-+ return size;
-+ }
++ return size;
++}
+
+/*Get a register from one of the registers which are saved onto the stack
+ upon function entry */
+
+static int
+avr32_get_saved_reg (int save_reg_mask)
-+ {
-+ unsigned int reg;
++{
++ unsigned int reg;
+
-+ /* Find the first register which is saved in the saved_reg_mask */
-+ for (reg = 0; reg <= 15; reg++)
-+ if (save_reg_mask & (1 << reg))
-+ return reg;
++ /* Find the first register which is saved in the saved_reg_mask */
++ for (reg = 0; reg <= 15; reg++)
++ if (save_reg_mask & (1 << reg))
++ return reg;
+
-+ return -1;
-+ }
++ return -1;
++}
+
+/* Return 1 if it is possible to return using a single instruction. */
+int
+avr32_use_return_insn (int iscond)
-+ {
-+ unsigned int func_type = avr32_current_func_type ();
-+ unsigned long saved_int_regs;
-+ unsigned long saved_fp_regs;
++{
++ unsigned int func_type = avr32_current_func_type ();
++ unsigned long saved_int_regs;
++ unsigned long saved_fp_regs;
+
-+ /* Never use a return instruction before reload has run. */
-+ if (!reload_completed)
-+ return 0;
++ /* Never use a return instruction before reload has run. */
++ if (!reload_completed)
++ return 0;
+
-+ /* Must adjust the stack for vararg functions. */
-+ if (current_function_args_info.uses_anonymous_args)
-+ return 0;
++ /* Must adjust the stack for vararg functions. */
++ if (current_function_args_info.uses_anonymous_args)
++ return 0;
+
-+ /* If there a stack adjstment. */
-+ if (get_frame_size ())
-+ return 0;
++ /* If there a stack adjstment. */
++ if (get_frame_size ())
++ return 0;
+
-+ saved_int_regs = avr32_compute_save_reg_mask (TRUE);
-+ saved_fp_regs = avr32_compute_save_fp_reg_mask ();
++ saved_int_regs = avr32_compute_save_reg_mask (TRUE);
++ saved_fp_regs = avr32_compute_save_fp_reg_mask ();
+
-+ /* Functions which have saved fp-regs on the stack can not be performed in
++ /* Functions which have saved fp-regs on the stack can not be performed in
+ one instruction */
-+ if (saved_fp_regs)
-+ return 0;
++ if (saved_fp_regs)
++ return 0;
+
-+ /* Conditional returns can not be performed in one instruction if we need
++ /* Conditional returns can not be performed in one instruction if we need
+ to restore registers from the stack */
-+ if (iscond && saved_int_regs)
-+ return 0;
++ if (iscond && saved_int_regs)
++ return 0;
+
-+ /* Conditional return can not be used for interrupt handlers. */
-+ if (iscond && IS_INTERRUPT (func_type))
-+ return 0;
++ /* Conditional return can not be used for interrupt handlers. */
++ if (iscond && IS_INTERRUPT (func_type))
++ return 0;
+
-+ /* For interrupt handlers which needs to pop registers */
-+ if (saved_int_regs && IS_INTERRUPT (func_type))
-+ return 0;
++ /* For interrupt handlers which needs to pop registers */
++ if (saved_int_regs && IS_INTERRUPT (func_type))
++ return 0;
+
+
-+ /* If there are saved registers but the LR isn't saved, then we need two
++ /* If there are saved registers but the LR isn't saved, then we need two
+ instructions for the return. */
-+ if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
-+ return 0;
++ if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
++ return 0;
+
+
-+ return 1;
-+ }
++ return 1;
++}
+
+
+/*Generate some function prologue info in the assembly file*/
+
+void
+avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size)
-+ {
-+ if (IS_NAKED (avr32_current_func_type ()))
-+ fprintf (f,
-+ "\t# Function is naked: Prologue and epilogue provided by programmer\n");
++{
++ if (IS_NAKED (avr32_current_func_type ()))
++ fprintf (f,
++ "\t# Function is naked: Prologue and epilogue provided by programmer\n");
+
-+ if (IS_INTERRUPT (avr32_current_func_type ()))
-+ {
-+ switch (avr32_current_func_type ())
-+ {
-+ case AVR32_FT_ISR_FULL:
-+ fprintf (f,
-+ "\t# Interrupt Function: Fully shadowed register file\n");
-+ break;
-+ case AVR32_FT_ISR_HALF:
-+ fprintf (f,
-+ "\t# Interrupt Function: Half shadowed register file\n");
-+ break;
-+ default:
-+ case AVR32_FT_ISR_NONE:
-+ fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
-+ break;
-+ }
-+ }
++ if (IS_INTERRUPT (avr32_current_func_type ()))
++ {
++ switch (avr32_current_func_type ())
++ {
++ case AVR32_FT_ISR_FULL:
++ fprintf (f,
++ "\t# Interrupt Function: Fully shadowed register file\n");
++ break;
++ case AVR32_FT_ISR_HALF:
++ fprintf (f,
++ "\t# Interrupt Function: Half shadowed register file\n");
++ break;
++ default:
++ case AVR32_FT_ISR_NONE:
++ fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
++ break;
++ }
++ }
+
+
-+ fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
-+ current_function_args_size, frame_size,
-+ current_function_pretend_args_size);
++ fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
++ current_function_args_size, frame_size,
++ current_function_pretend_args_size);
+
-+ fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
-+ frame_pointer_needed, current_function_is_leaf);
++ fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
++ frame_pointer_needed, current_function_is_leaf);
+
-+ fprintf (f, "\t# uses_anonymous_args = %i\n",
-+ current_function_args_info.uses_anonymous_args);
-+ if (current_function_calls_eh_return)
-+ fprintf (f, "\t# Calls __builtin_eh_return.\n");
++ fprintf (f, "\t# uses_anonymous_args = %i\n",
++ current_function_args_info.uses_anonymous_args);
++ if (current_function_calls_eh_return)
++ fprintf (f, "\t# Calls __builtin_eh_return.\n");
+
-+ }
++}
+
+
+/* Generate and emit an insn that we will recognize as a pushm or stm.
+
+static rtx
+emit_multi_reg_push (int reglist, int usePUSHM)
-+ {
-+ rtx insn;
-+ rtx dwarf;
-+ rtx tmp;
-+ rtx reg;
-+ int i;
-+ int nr_regs;
-+ int index = 0;
++{
++ rtx insn;
++ rtx dwarf;
++ rtx tmp;
++ rtx reg;
++ int i;
++ int nr_regs;
++ int index = 0;
+
-+ if (usePUSHM)
-+ {
-+ insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
-+ reglist = avr32_convert_to_reglist16 (reglist);
-+ }
-+ else
-+ {
-+ insn = emit_insn (gen_stm (stack_pointer_rtx,
-+ gen_rtx_CONST_INT (SImode, reglist),
-+ gen_rtx_CONST_INT (SImode, 1)));
-+ }
++ if (usePUSHM)
++ {
++ insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
++ reglist = avr32_convert_to_reglist16 (reglist);
++ }
++ else
++ {
++ insn = emit_insn (gen_stm (stack_pointer_rtx,
++ gen_rtx_CONST_INT (SImode, reglist),
++ gen_rtx_CONST_INT (SImode, 1)));
++ }
+
-+ nr_regs = avr32_get_reg_mask_size (reglist) / 4;
-+ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
++ nr_regs = avr32_get_reg_mask_size (reglist) / 4;
++ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
+
-+ for (i = 15; i >= 0; i--)
-+ {
-+ if (reglist & (1 << i))
-+ {
-+ reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
-+ tmp = gen_rtx_SET (VOIDmode,
-+ gen_rtx_MEM (SImode,
-+ plus_constant (stack_pointer_rtx,
-+ 4 * index)), reg);
-+ RTX_FRAME_RELATED_P (tmp) = 1;
-+ XVECEXP (dwarf, 0, 1 + index++) = tmp;
-+ }
-+ }
++ for (i = 15; i >= 0; i--)
++ {
++ if (reglist & (1 << i))
++ {
++ reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
++ tmp = gen_rtx_SET (VOIDmode,
++ gen_rtx_MEM (SImode,
++ plus_constant (stack_pointer_rtx,
++ 4 * index)), reg);
++ RTX_FRAME_RELATED_P (tmp) = 1;
++ XVECEXP (dwarf, 0, 1 + index++) = tmp;
++ }
++ }
+
-+ tmp = gen_rtx_SET (SImode,
-+ stack_pointer_rtx,
-+ gen_rtx_PLUS (SImode,
-+ stack_pointer_rtx,
-+ GEN_INT (-4 * nr_regs)));
-+ RTX_FRAME_RELATED_P (tmp) = 1;
-+ XVECEXP (dwarf, 0, 0) = tmp;
-+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
-+ REG_NOTES (insn));
-+ return insn;
-+ }
++ tmp = gen_rtx_SET (SImode,
++ stack_pointer_rtx,
++ gen_rtx_PLUS (SImode,
++ stack_pointer_rtx,
++ GEN_INT (-4 * nr_regs)));
++ RTX_FRAME_RELATED_P (tmp) = 1;
++ XVECEXP (dwarf, 0, 0) = tmp;
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
++ REG_NOTES (insn));
++ return insn;
++}
+
+
+static rtx
+emit_multi_fp_reg_push (int reglist)
-+ {
-+ rtx insn;
-+ rtx dwarf;
-+ rtx tmp;
-+ rtx reg;
-+ int i;
-+ int nr_regs;
-+ int index = 0;
++{
++ rtx insn;
++ rtx dwarf;
++ rtx tmp;
++ rtx reg;
++ int i;
++ int nr_regs;
++ int index = 0;
+
-+ insn = emit_insn (gen_stm_fp (stack_pointer_rtx,
-+ gen_rtx_CONST_INT (SImode, reglist),
-+ gen_rtx_CONST_INT (SImode, 1)));
++ insn = emit_insn (gen_stm_fp (stack_pointer_rtx,
++ gen_rtx_CONST_INT (SImode, reglist),
++ gen_rtx_CONST_INT (SImode, 1)));
+
-+ nr_regs = avr32_get_reg_mask_size (reglist) / 4;
-+ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
++ nr_regs = avr32_get_reg_mask_size (reglist) / 4;
++ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
+
-+ for (i = 15; i >= 0; i--)
-+ {
-+ if (reglist & (1 << i))
-+ {
-+ reg = gen_rtx_REG (SImode, INTERNAL_FP_REGNUM (i));
-+ tmp = gen_rtx_SET (VOIDmode,
-+ gen_rtx_MEM (SImode,
-+ plus_constant (stack_pointer_rtx,
-+ 4 * index)), reg);
-+ RTX_FRAME_RELATED_P (tmp) = 1;
-+ XVECEXP (dwarf, 0, 1 + index++) = tmp;
-+ }
-+ }
++ for (i = 15; i >= 0; i--)
++ {
++ if (reglist & (1 << i))
++ {
++ reg = gen_rtx_REG (SImode, INTERNAL_FP_REGNUM (i));
++ tmp = gen_rtx_SET (VOIDmode,
++ gen_rtx_MEM (SImode,
++ plus_constant (stack_pointer_rtx,
++ 4 * index)), reg);
++ RTX_FRAME_RELATED_P (tmp) = 1;
++ XVECEXP (dwarf, 0, 1 + index++) = tmp;
++ }
++ }
+
-+ tmp = gen_rtx_SET (SImode,
-+ stack_pointer_rtx,
-+ gen_rtx_PLUS (SImode,
-+ stack_pointer_rtx,
-+ GEN_INT (-4 * nr_regs)));
-+ RTX_FRAME_RELATED_P (tmp) = 1;
-+ XVECEXP (dwarf, 0, 0) = tmp;
-+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
-+ REG_NOTES (insn));
-+ return insn;
-+ }
++ tmp = gen_rtx_SET (SImode,
++ stack_pointer_rtx,
++ gen_rtx_PLUS (SImode,
++ stack_pointer_rtx,
++ GEN_INT (-4 * nr_regs)));
++ RTX_FRAME_RELATED_P (tmp) = 1;
++ XVECEXP (dwarf, 0, 0) = tmp;
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
++ REG_NOTES (insn));
++ return insn;
++}
+
+rtx
+avr32_gen_load_multiple (rtx * regs, int count, rtx from,
-+ int write_back, int in_struct_p, int scalar_p)
-+ {
++ int write_back, int in_struct_p, int scalar_p)
++{
+
-+ rtx result;
-+ int i = 0, j;
++ rtx result;
++ int i = 0, j;
+
-+ result =
-+ gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
++ result =
++ gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
+
-+ if (write_back)
-+ {
-+ XVECEXP (result, 0, 0)
-+ = gen_rtx_SET (GET_MODE (from), from,
-+ plus_constant (from, count * 4));
-+ i = 1;
-+ count++;
-+ }
++ if (write_back)
++ {
++ XVECEXP (result, 0, 0)
++ = gen_rtx_SET (GET_MODE (from), from,
++ plus_constant (from, count * 4));
++ i = 1;
++ count++;
++ }
+
+
-+ for (j = 0; i < count; i++, j++)
-+ {
-+ rtx unspec;
-+ rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
-+ MEM_IN_STRUCT_P (mem) = in_struct_p;
-+ MEM_SCALAR_P (mem) = scalar_p;
-+ unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
-+ XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
-+ }
++ for (j = 0; i < count; i++, j++)
++ {
++ rtx unspec;
++ rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
++ MEM_IN_STRUCT_P (mem) = in_struct_p;
++ MEM_SCALAR_P (mem) = scalar_p;
++ unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
++ XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
++ }
+
-+ return result;
-+ }
++ return result;
++}
+
+
+rtx
+avr32_gen_store_multiple (rtx * regs, int count, rtx to,
-+ int in_struct_p, int scalar_p)
-+ {
-+ rtx result;
-+ int i = 0, j;
++ int in_struct_p, int scalar_p)
++{
++ rtx result;
++ int i = 0, j;
+
-+ result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
++ result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+
-+ for (j = 0; i < count; i++, j++)
-+ {
-+ rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
-+ MEM_IN_STRUCT_P (mem) = in_struct_p;
-+ MEM_SCALAR_P (mem) = scalar_p;
-+ XVECEXP (result, 0, i)
-+ = gen_rtx_SET (VOIDmode, mem,
-+ gen_rtx_UNSPEC (VOIDmode,
-+ gen_rtvec (1, regs[j]),
-+ UNSPEC_STORE_MULTIPLE));
-+ }
++ for (j = 0; i < count; i++, j++)
++ {
++ rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
++ MEM_IN_STRUCT_P (mem) = in_struct_p;
++ MEM_SCALAR_P (mem) = scalar_p;
++ XVECEXP (result, 0, i)
++ = gen_rtx_SET (VOIDmode, mem,
++ gen_rtx_UNSPEC (VOIDmode,
++ gen_rtvec (1, regs[j]),
++ UNSPEC_STORE_MULTIPLE));
++ }
+
-+ return result;
-+ }
++ return result;
++}
+
+
+/* Move a block of memory if it is word aligned or we support unaligned
+
+int
+avr32_gen_movmemsi (rtx * operands)
-+ {
-+ HOST_WIDE_INT bytes_to_go;
-+ rtx src, dst;
-+ rtx st_src, st_dst;
-+ int ptr_offset = 0;
-+ int block_size;
-+ int dst_in_struct_p, src_in_struct_p;
-+ int dst_scalar_p, src_scalar_p;
-+ int unaligned;
-+
-+ if (GET_CODE (operands[2]) != CONST_INT
-+ || GET_CODE (operands[3]) != CONST_INT
-+ || INTVAL (operands[2]) > 64
-+ || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
-+ return 0;
++{
++ HOST_WIDE_INT bytes_to_go;
++ rtx src, dst;
++ rtx st_src, st_dst;
++ int src_offset = 0, dst_offset = 0;
++ int block_size;
++ int dst_in_struct_p, src_in_struct_p;
++ int dst_scalar_p, src_scalar_p;
++ int unaligned;
++
++ if (GET_CODE (operands[2]) != CONST_INT
++ || GET_CODE (operands[3]) != CONST_INT
++ || INTVAL (operands[2]) > 64
++ || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
++ return 0;
+
-+ unaligned = (INTVAL (operands[3]) & 3) != 0;
++ unaligned = (INTVAL (operands[3]) & 3) != 0;
+
-+ block_size = 4;
++ block_size = 4;
+
-+ st_dst = XEXP (operands[0], 0);
-+ st_src = XEXP (operands[1], 0);
++ st_dst = XEXP (operands[0], 0);
++ st_src = XEXP (operands[1], 0);
+
-+ dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
-+ dst_scalar_p = MEM_SCALAR_P (operands[0]);
-+ src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
-+ src_scalar_p = MEM_SCALAR_P (operands[1]);
++ dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
++ dst_scalar_p = MEM_SCALAR_P (operands[0]);
++ src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
++ src_scalar_p = MEM_SCALAR_P (operands[1]);
+
-+ dst = copy_to_mode_reg (SImode, st_dst);
-+ src = copy_to_mode_reg (SImode, st_src);
++ dst = copy_to_mode_reg (SImode, st_dst);
++ src = copy_to_mode_reg (SImode, st_src);
+
-+ bytes_to_go = INTVAL (operands[2]);
++ bytes_to_go = INTVAL (operands[2]);
+
-+ while (bytes_to_go)
-+ {
-+ enum machine_mode move_mode;
-+ /* (Seems to be a problem with reloads for the movti pattern so this is
-+ disabled until that problem is resolved)
++ while (bytes_to_go)
++ {
++ enum machine_mode move_mode;
++ /* (Seems to be a problem with reloads for the movti pattern so this is
++ disabled until that problem is resolved)
+ UPDATE: Problem seems to be solved now.... */
-+ if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned
-+ /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */
-+ && avr32_arch->arch_type != ARCH_TYPE_AVR32_UC)
-+ move_mode = TImode;
-+ else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
-+ move_mode = DImode;
-+ else if (bytes_to_go >= GET_MODE_SIZE (SImode))
-+ move_mode = SImode;
-+ else
-+ move_mode = QImode;
++ if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned
++ /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */
++ && !TARGET_ARCH_UC)
++ move_mode = TImode;
++ else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
++ move_mode = DImode;
++ else if (bytes_to_go >= GET_MODE_SIZE (SImode))
++ move_mode = SImode;
++ else
++ move_mode = QImode;
+
++ {
++ rtx src_mem;
++ rtx dst_mem = gen_rtx_MEM (move_mode,
++ gen_rtx_PLUS (SImode, dst,
++ GEN_INT (dst_offset)));
++ dst_offset += GET_MODE_SIZE (move_mode);
++ if ( 0 /* This causes an error in GCC. Think there is
++ something wrong in the gcse pass which causes REQ_EQUIV notes
++ to be wrong so disabling it for now. */
++ && move_mode == TImode
++ && INTVAL (operands[2]) > GET_MODE_SIZE (TImode) )
+ {
-+ rtx dst_mem = gen_rtx_MEM (move_mode,
-+ gen_rtx_PLUS (SImode, dst,
-+ GEN_INT (ptr_offset)));
-+ rtx src_mem = gen_rtx_MEM (move_mode,
-+ gen_rtx_PLUS (SImode, src,
-+ GEN_INT (ptr_offset)));
-+ ptr_offset += GET_MODE_SIZE (move_mode);
-+ bytes_to_go -= GET_MODE_SIZE (move_mode);
-+
-+ MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
-+ MEM_SCALAR_P (dst_mem) = dst_scalar_p;
-+
-+ MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
-+ MEM_SCALAR_P (src_mem) = src_scalar_p;
-+ emit_move_insn (dst_mem, src_mem);
-+
++ src_mem = gen_rtx_MEM (move_mode,
++ gen_rtx_POST_INC (SImode, src));
++ }
++ else
++ {
++ src_mem = gen_rtx_MEM (move_mode,
++ gen_rtx_PLUS (SImode, src,
++ GEN_INT (src_offset)));
++ src_offset += GET_MODE_SIZE (move_mode);
+ }
++
++ bytes_to_go -= GET_MODE_SIZE (move_mode);
++
++ MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
++ MEM_SCALAR_P (dst_mem) = dst_scalar_p;
++
++ MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
++ MEM_SCALAR_P (src_mem) = src_scalar_p;
++ emit_move_insn (dst_mem, src_mem);
++
+ }
++ }
+
-+ return 1;
-+ }
++ return 1;
++}
+
+
+
+/*Expand the prologue instruction*/
+void
+avr32_expand_prologue (void)
-+ {
-+ rtx insn, dwarf;
-+ unsigned long saved_reg_mask, saved_fp_reg_mask;
-+ int reglist8 = 0;
++{
++ rtx insn, dwarf;
++ unsigned long saved_reg_mask, saved_fp_reg_mask;
++ int reglist8 = 0;
+
-+ /* Naked functions does not have a prologue */
-+ if (IS_NAKED (avr32_current_func_type ()))
-+ return;
++ /* Naked functions does not have a prologue */
++ if (IS_NAKED (avr32_current_func_type ()))
++ return;
+
-+ saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
++ saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
+
-+ if (saved_reg_mask)
-+ {
-+ /* Must push used registers */
-+
-+ /* Should we use POPM or LDM? */
-+ int usePUSHM = TRUE;
-+ reglist8 = 0;
-+ if (((saved_reg_mask & (1 << 0)) ||
-+ (saved_reg_mask & (1 << 1)) ||
-+ (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
-+ {
-+ /* One of R0-R3 should at least be pushed */
-+ if (((saved_reg_mask & (1 << 0)) &&
-+ (saved_reg_mask & (1 << 1)) &&
-+ (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
-+ {
-+ /* All should be pushed */
-+ reglist8 |= 0x01;
-+ }
-+ else
-+ {
-+ usePUSHM = FALSE;
-+ }
-+ }
++ if (saved_reg_mask)
++ {
++ /* Must push used registers */
++
++ /* Should we use POPM or LDM? */
++ int usePUSHM = TRUE;
++ reglist8 = 0;
++ if (((saved_reg_mask & (1 << 0)) ||
++ (saved_reg_mask & (1 << 1)) ||
++ (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
++ {
++ /* One of R0-R3 should at least be pushed */
++ if (((saved_reg_mask & (1 << 0)) &&
++ (saved_reg_mask & (1 << 1)) &&
++ (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
++ {
++ /* All should be pushed */
++ reglist8 |= 0x01;
++ }
++ else
++ {
++ usePUSHM = FALSE;
++ }
++ }
+
-+ if (((saved_reg_mask & (1 << 4)) ||
-+ (saved_reg_mask & (1 << 5)) ||
-+ (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
-+ {
-+ /* One of R4-R7 should at least be pushed */
-+ if (((saved_reg_mask & (1 << 4)) &&
-+ (saved_reg_mask & (1 << 5)) &&
-+ (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
-+ {
-+ if (usePUSHM)
-+ /* All should be pushed */
-+ reglist8 |= 0x02;
-+ }
-+ else
-+ {
-+ usePUSHM = FALSE;
-+ }
-+ }
++ if (((saved_reg_mask & (1 << 4)) ||
++ (saved_reg_mask & (1 << 5)) ||
++ (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
++ {
++ /* One of R4-R7 should at least be pushed */
++ if (((saved_reg_mask & (1 << 4)) &&
++ (saved_reg_mask & (1 << 5)) &&
++ (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
++ {
++ if (usePUSHM)
++ /* All should be pushed */
++ reglist8 |= 0x02;
++ }
++ else
++ {
++ usePUSHM = FALSE;
++ }
++ }
+
-+ if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
-+ {
-+ /* One of R8-R9 should at least be pushed */
-+ if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
-+ {
-+ if (usePUSHM)
-+ /* All should be pushed */
-+ reglist8 |= 0x04;
-+ }
-+ else
-+ {
-+ usePUSHM = FALSE;
-+ }
-+ }
++ if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
++ {
++ /* One of R8-R9 should at least be pushed */
++ if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
++ {
++ if (usePUSHM)
++ /* All should be pushed */
++ reglist8 |= 0x04;
++ }
++ else
++ {
++ usePUSHM = FALSE;
++ }
++ }
+
-+ if (saved_reg_mask & (1 << 10))
-+ reglist8 |= 0x08;
++ if (saved_reg_mask & (1 << 10))
++ reglist8 |= 0x08;
+
-+ if (saved_reg_mask & (1 << 11))
-+ reglist8 |= 0x10;
++ if (saved_reg_mask & (1 << 11))
++ reglist8 |= 0x10;
+
-+ if (saved_reg_mask & (1 << 12))
-+ reglist8 |= 0x20;
++ if (saved_reg_mask & (1 << 12))
++ reglist8 |= 0x20;
+
-+ if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
-+ {
-+ /* Push LR */
-+ reglist8 |= 0x40;
-+ }
++ if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
++ {
++ /* Push LR */
++ reglist8 |= 0x40;
++ }
+
-+ if (usePUSHM)
-+ {
-+ insn = emit_multi_reg_push (reglist8, TRUE);
-+ }
-+ else
-+ {
-+ insn = emit_multi_reg_push (saved_reg_mask, FALSE);
-+ }
-+ RTX_FRAME_RELATED_P (insn) = 1;
++ if (usePUSHM)
++ {
++ insn = emit_multi_reg_push (reglist8, TRUE);
++ }
++ else
++ {
++ insn = emit_multi_reg_push (saved_reg_mask, FALSE);
++ }
++ RTX_FRAME_RELATED_P (insn) = 1;
+
-+ /* Prevent this instruction from being scheduled after any other
++ /* Prevent this instruction from being scheduled after any other
+ instructions. */
-+ emit_insn (gen_blockage ());
-+ }
++ emit_insn (gen_blockage ());
++ }
+
-+ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
-+ if (saved_fp_reg_mask)
-+ {
-+ insn = emit_multi_fp_reg_push (saved_fp_reg_mask);
-+ RTX_FRAME_RELATED_P (insn) = 1;
++ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
++ if (saved_fp_reg_mask)
++ {
++ insn = emit_multi_fp_reg_push (saved_fp_reg_mask);
++ RTX_FRAME_RELATED_P (insn) = 1;
+
-+ /* Prevent this instruction from being scheduled after any other
++ /* Prevent this instruction from being scheduled after any other
+ instructions. */
-+ emit_insn (gen_blockage ());
-+ }
++ emit_insn (gen_blockage ());
++ }
+
-+ /* Set frame pointer */
-+ if (frame_pointer_needed)
-+ {
-+ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
-+ RTX_FRAME_RELATED_P (insn) = 1;
-+ }
++ /* Set frame pointer */
++ if (frame_pointer_needed)
++ {
++ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
++ RTX_FRAME_RELATED_P (insn) = 1;
++ }
+
-+ if (get_frame_size () > 0)
-+ {
-+ if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
-+ {
-+ insn = emit_insn (gen_rtx_SET (SImode,
-+ stack_pointer_rtx,
-+ gen_rtx_PLUS (SImode,
-+ stack_pointer_rtx,
-+ gen_rtx_CONST_INT
-+ (SImode,
-+ -get_frame_size
-+ ()))));
-+ RTX_FRAME_RELATED_P (insn) = 1;
-+ }
-+ else
-+ {
-+ /* Immediate is larger than k21 We must either check if we can use
++ if (get_frame_size () > 0)
++ {
++ if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
++ {
++ insn = emit_insn (gen_rtx_SET (SImode,
++ stack_pointer_rtx,
++ gen_rtx_PLUS (SImode,
++ stack_pointer_rtx,
++ gen_rtx_CONST_INT
++ (SImode,
++ -get_frame_size
++ ()))));
++ RTX_FRAME_RELATED_P (insn) = 1;
++ }
++ else
++ {
++ /* Immediate is larger than k21 We must either check if we can use
+ one of the pushed reegisters as temporary storage or we must
+ make us a temp register by pushing a register to the stack. */
-+ rtx temp_reg, const_pool_entry, insn;
-+ if (saved_reg_mask)
-+ {
-+ temp_reg =
-+ gen_rtx_REG (SImode,
-+ INTERNAL_REGNUM (avr32_get_saved_reg
-+ (saved_reg_mask)));
-+ }
-+ else
-+ {
-+ temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
-+ emit_move_insn (gen_rtx_MEM
-+ (SImode,
-+ gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
-+ temp_reg);
-+ }
++ rtx temp_reg, const_pool_entry, insn;
++ if (saved_reg_mask)
++ {
++ temp_reg =
++ gen_rtx_REG (SImode,
++ INTERNAL_REGNUM (avr32_get_saved_reg
++ (saved_reg_mask)));
++ }
++ else
++ {
++ temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
++ emit_move_insn (gen_rtx_MEM
++ (SImode,
++ gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
++ temp_reg);
++ }
+
-+ const_pool_entry =
-+ force_const_mem (SImode,
-+ gen_rtx_CONST_INT (SImode, get_frame_size ()));
-+ emit_move_insn (temp_reg, const_pool_entry);
-+
-+ insn = emit_insn (gen_rtx_SET (SImode,
-+ stack_pointer_rtx,
-+ gen_rtx_MINUS (SImode,
-+ stack_pointer_rtx,
-+ temp_reg)));
-+
-+ dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
-+ gen_rtx_PLUS (SImode, stack_pointer_rtx,
-+ GEN_INT (-get_frame_size ())));
-+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
-+ dwarf, REG_NOTES (insn));
-+ RTX_FRAME_RELATED_P (insn) = 1;
-+
-+ if (!saved_reg_mask)
-+ {
-+ insn =
-+ emit_move_insn (temp_reg,
-+ gen_rtx_MEM (SImode,
-+ gen_rtx_POST_INC (SImode,
-+ gen_rtx_REG
-+ (SImode,
-+ 13))));
-+ }
++ const_pool_entry =
++ force_const_mem (SImode,
++ gen_rtx_CONST_INT (SImode, get_frame_size ()));
++ emit_move_insn (temp_reg, const_pool_entry);
++
++ insn = emit_insn (gen_rtx_SET (SImode,
++ stack_pointer_rtx,
++ gen_rtx_MINUS (SImode,
++ stack_pointer_rtx,
++ temp_reg)));
++
++ dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
++ gen_rtx_PLUS (SImode, stack_pointer_rtx,
++ GEN_INT (-get_frame_size ())));
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
++ dwarf, REG_NOTES (insn));
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ if (!saved_reg_mask)
++ {
++ insn =
++ emit_move_insn (temp_reg,
++ gen_rtx_MEM (SImode,
++ gen_rtx_POST_INC (SImode,
++ gen_rtx_REG
++ (SImode,
++ 13))));
++ }
+
-+ /* Mark the temp register as dead */
-+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
-+ REG_NOTES (insn));
++ /* Mark the temp register as dead */
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
++ REG_NOTES (insn));
+
+
-+ }
++ }
+
-+ /* Prevent the the stack adjustment to be scheduled after any
++ /* Prevent the the stack adjustment to be scheduled after any
+ instructions using the frame pointer. */
-+ emit_insn (gen_blockage ());
-+ }
++ emit_insn (gen_blockage ());
++ }
+
-+ /* Load GOT */
-+ if (flag_pic)
-+ {
-+ avr32_load_pic_register ();
++ /* Load GOT */
++ if (flag_pic)
++ {
++ avr32_load_pic_register ();
+
-+ /* gcc does not know that load or call instructions might use the pic
++ /* gcc does not know that load or call instructions might use the pic
+ register so it might schedule these instructions before the loading
+ of the pic register. To avoid this emit a barrier for now. TODO!
+ Find out a better way to let gcc know which instructions might use
+ the pic register. */
-+ emit_insn (gen_blockage ());
-+ }
-+ return;
-+ }
++ emit_insn (gen_blockage ());
++ }
++ return;
++}
+
+void
+avr32_set_return_address (rtx source, rtx scratch)
-+ {
-+ rtx addr;
-+ unsigned long saved_regs;
++{
++ rtx addr;
++ unsigned long saved_regs;
+
-+ saved_regs = avr32_compute_save_reg_mask (TRUE);
++ saved_regs = avr32_compute_save_reg_mask (TRUE);
+
-+ if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
-+ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
-+ else
-+ {
-+ if (frame_pointer_needed)
-+ addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
-+ else
-+ if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16"))
-+ {
-+ addr = plus_constant (stack_pointer_rtx, get_frame_size ());
-+ }
-+ else
-+ {
-+ emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ())));
-+ addr = scratch;
-+ }
-+ emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
-+ }
-+ }
++ if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
++ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
++ else
++ {
++ if (frame_pointer_needed)
++ addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
++ else
++ if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16"))
++ {
++ addr = plus_constant (stack_pointer_rtx, get_frame_size ());
++ }
++ else
++ {
++ emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ())));
++ addr = scratch;
++ }
++ emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
++ }
++}
+
+
+
+
+int
+avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED,
-+ int length ATTRIBUTE_UNUSED)
-+ {
-+ return length;
-+ }
++ int length ATTRIBUTE_UNUSED)
++{
++ return length;
++}
+
+void
+avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED,
-+ int iscond ATTRIBUTE_UNUSED,
-+ rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
-+ {
-+
-+ unsigned long saved_reg_mask, saved_fp_reg_mask;
-+ int insert_ret = TRUE;
-+ int reglist8 = 0;
-+ int stack_adjustment = get_frame_size ();
-+ unsigned int func_type = avr32_current_func_type ();
-+ FILE *f = asm_out_file;
++ int iscond ATTRIBUTE_UNUSED,
++ rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
++{
+
-+ /* Naked functions does not have an epilogue */
-+ if (IS_NAKED (func_type))
-+ return;
++ unsigned long saved_reg_mask, saved_fp_reg_mask;
++ int insert_ret = TRUE;
++ int reglist8 = 0;
++ int stack_adjustment = get_frame_size ();
++ unsigned int func_type = avr32_current_func_type ();
++ FILE *f = asm_out_file;
+
-+ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
++ /* Naked functions does not have an epilogue */
++ if (IS_NAKED (func_type))
++ return;
+
-+ saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
++ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
+
-+ /* Reset frame pointer */
-+ if (stack_adjustment > 0)
-+ {
-+ if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
-+ {
-+ fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n",
-+ -stack_adjustment);
-+ }
-+ else
-+ {
-+ /* TODO! Is it safe to use r8 as scratch?? */
-+ fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n",
-+ -stack_adjustment);
-+ fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n",
-+ -stack_adjustment);
-+ fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n");
-+ }
-+ }
++ saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
+
-+ if (saved_fp_reg_mask)
-+ {
-+ char reglist[64]; /* 64 bytes should be enough... */
-+ avr32_make_fp_reglist_w (saved_fp_reg_mask, (char *) reglist);
-+ fprintf (f, "\tldcm.w\tcp0, sp++, %s\n", reglist);
-+ if (saved_fp_reg_mask & ~0xff)
-+ {
-+ saved_fp_reg_mask &= ~0xff;
-+ avr32_make_fp_reglist_d (saved_fp_reg_mask, (char *) reglist);
-+ fprintf (f, "\tldcm.d\tcp0, sp++, %s\n", reglist);
-+ }
-+ }
++ /* Reset frame pointer */
++ if (stack_adjustment > 0)
++ {
++ if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
++ {
++ fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n",
++ -stack_adjustment);
++ }
++ else
++ {
++ /* TODO! Is it safe to use r8 as scratch?? */
++ fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n",
++ -stack_adjustment);
++ fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n",
++ -stack_adjustment);
++ fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n");
++ }
++ }
+
-+ if (saved_reg_mask)
-+ {
-+ /* Must pop used registers */
++ if (saved_fp_reg_mask)
++ {
++ char reglist[64]; /* 64 bytes should be enough... */
++ avr32_make_fp_reglist_w (saved_fp_reg_mask, (char *) reglist);
++ fprintf (f, "\tldcm.w\tcp0, sp++, %s\n", reglist);
++ if (saved_fp_reg_mask & ~0xff)
++ {
++ saved_fp_reg_mask &= ~0xff;
++ avr32_make_fp_reglist_d (saved_fp_reg_mask, (char *) reglist);
++ fprintf (f, "\tldcm.d\tcp0, sp++, %s\n", reglist);
++ }
++ }
+
-+ /* Should we use POPM or LDM? */
-+ int usePOPM = TRUE;
-+ if (((saved_reg_mask & (1 << 0)) ||
-+ (saved_reg_mask & (1 << 1)) ||
-+ (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
-+ {
-+ /* One of R0-R3 should at least be popped */
-+ if (((saved_reg_mask & (1 << 0)) &&
-+ (saved_reg_mask & (1 << 1)) &&
-+ (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
-+ {
-+ /* All should be popped */
-+ reglist8 |= 0x01;
-+ }
-+ else
-+ {
-+ usePOPM = FALSE;
-+ }
-+ }
++ if (saved_reg_mask)
++ {
++ /* Must pop used registers */
++
++ /* Should we use POPM or LDM? */
++ int usePOPM = TRUE;
++ if (((saved_reg_mask & (1 << 0)) ||
++ (saved_reg_mask & (1 << 1)) ||
++ (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
++ {
++ /* One of R0-R3 should at least be popped */
++ if (((saved_reg_mask & (1 << 0)) &&
++ (saved_reg_mask & (1 << 1)) &&
++ (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
++ {
++ /* All should be popped */
++ reglist8 |= 0x01;
++ }
++ else
++ {
++ usePOPM = FALSE;
++ }
++ }
+
-+ if (((saved_reg_mask & (1 << 4)) ||
-+ (saved_reg_mask & (1 << 5)) ||
-+ (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
-+ {
-+ /* One of R0-R3 should at least be popped */
-+ if (((saved_reg_mask & (1 << 4)) &&
-+ (saved_reg_mask & (1 << 5)) &&
-+ (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
-+ {
-+ if (usePOPM)
-+ /* All should be popped */
-+ reglist8 |= 0x02;
-+ }
-+ else
-+ {
-+ usePOPM = FALSE;
-+ }
-+ }
++ if (((saved_reg_mask & (1 << 4)) ||
++ (saved_reg_mask & (1 << 5)) ||
++ (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
++ {
++ /* One of R0-R3 should at least be popped */
++ if (((saved_reg_mask & (1 << 4)) &&
++ (saved_reg_mask & (1 << 5)) &&
++ (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
++ {
++ if (usePOPM)
++ /* All should be popped */
++ reglist8 |= 0x02;
++ }
++ else
++ {
++ usePOPM = FALSE;
++ }
++ }
+
-+ if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
-+ {
-+ /* One of R8-R9 should at least be pushed */
-+ if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
-+ {
-+ if (usePOPM)
-+ /* All should be pushed */
-+ reglist8 |= 0x04;
-+ }
-+ else
-+ {
-+ usePOPM = FALSE;
-+ }
-+ }
++ if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
++ {
++ /* One of R8-R9 should at least be pushed */
++ if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
++ {
++ if (usePOPM)
++ /* All should be pushed */
++ reglist8 |= 0x04;
++ }
++ else
++ {
++ usePOPM = FALSE;
++ }
++ }
+
-+ if (saved_reg_mask & (1 << 10))
-+ reglist8 |= 0x08;
++ if (saved_reg_mask & (1 << 10))
++ reglist8 |= 0x08;
+
-+ if (saved_reg_mask & (1 << 11))
-+ reglist8 |= 0x10;
++ if (saved_reg_mask & (1 << 11))
++ reglist8 |= 0x10;
+
-+ if (saved_reg_mask & (1 << 12))
-+ reglist8 |= 0x20;
++ if (saved_reg_mask & (1 << 12))
++ reglist8 |= 0x20;
+
-+ if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
-+ /* Pop LR */
-+ reglist8 |= 0x40;
++ if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
++ /* Pop LR */
++ reglist8 |= 0x40;
+
-+ if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
-+ /* Pop LR into PC. */
-+ reglist8 |= 0x80;
++ if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
++ /* Pop LR into PC. */
++ reglist8 |= 0x80;
+
-+ if (usePOPM)
-+ {
-+ char reglist[64]; /* 64 bytes should be enough... */
-+ avr32_make_reglist8 (reglist8, (char *) reglist);
++ if (usePOPM)
++ {
++ char reglist[64]; /* 64 bytes should be enough... */
++ avr32_make_reglist8 (reglist8, (char *) reglist);
+
-+ if (reglist8 & 0x80)
-+ /* This instruction is also a return */
-+ insert_ret = FALSE;
++ if (reglist8 & 0x80)
++ /* This instruction is also a return */
++ insert_ret = FALSE;
+
-+ if (r12_imm && !insert_ret)
-+ fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm));
-+ else
-+ fprintf (f, "\tpopm\t%s\n", reglist);
++ if (r12_imm && !insert_ret)
++ fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm));
++ else
++ fprintf (f, "\tpopm\t%s\n", reglist);
+
-+ }
-+ else
-+ {
-+ char reglist[64]; /* 64 bytes should be enough... */
-+ avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
-+ if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
-+ /* This instruction is also a return */
-+ insert_ret = FALSE;
-+
-+ if (r12_imm && !insert_ret)
-+ fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist,
-+ INTVAL (r12_imm));
-+ else
-+ fprintf (f, "\tldm\tsp++, %s\n", reglist);
++ }
++ else
++ {
++ char reglist[64]; /* 64 bytes should be enough... */
++ avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
++ if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
++ /* This instruction is also a return */
++ insert_ret = FALSE;
++
++ if (r12_imm && !insert_ret)
++ fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist,
++ INTVAL (r12_imm));
++ else
++ fprintf (f, "\tldm\tsp++, %s\n", reglist);
+
-+ }
++ }
+
-+ }
++ }
+
-+ /* Stack adjustment for exception handler. */
-+ if (current_function_calls_eh_return)
-+ fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO));
++ /* Stack adjustment for exception handler. */
++ if (current_function_calls_eh_return)
++ fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO));
+
+
-+ if (IS_INTERRUPT (func_type))
-+ {
-+ fprintf (f, "\trete\n");
-+ }
-+ else if (insert_ret)
-+ {
-+ if (r12_imm)
-+ fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm));
-+ else
-+ fprintf (f, "\tretal\tr12\n");
-+ }
-+ }
++ if (IS_INTERRUPT (func_type))
++ {
++ fprintf (f, "\trete\n");
++ }
++ else if (insert_ret)
++ {
++ if (r12_imm)
++ fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm));
++ else
++ fprintf (f, "\tretal\tr12\n");
++ }
++}
+
+/* Function for converting a fp-register mask to a
+ reglistCPD8 register list string. */
+void
+avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string)
-+ {
-+ int i;
++{
++ int i;
+
-+ /* Make sure reglist_string is empty */
-+ reglist_string[0] = '\0';
++ /* Make sure reglist_string is empty */
++ reglist_string[0] = '\0';
+
-+ for (i = 0; i < NUM_FP_REGS; i += 2)
-+ {
-+ if (reglist_mask & (1 << i))
-+ {
-+ strlen (reglist_string) ?
-+ sprintf (reglist_string, "%s, %s-%s", reglist_string,
-+ reg_names[INTERNAL_FP_REGNUM (i)],
-+ reg_names[INTERNAL_FP_REGNUM (i + 1)]) :
-+ sprintf (reglist_string, "%s-%s",
-+ reg_names[INTERNAL_FP_REGNUM (i)],
-+ reg_names[INTERNAL_FP_REGNUM (i + 1)]);
-+ }
-+ }
-+ }
++ for (i = 0; i < NUM_FP_REGS; i += 2)
++ {
++ if (reglist_mask & (1 << i))
++ {
++ strlen (reglist_string) ?
++ sprintf (reglist_string, "%s, %s-%s", reglist_string,
++ reg_names[INTERNAL_FP_REGNUM (i)],
++ reg_names[INTERNAL_FP_REGNUM (i + 1)]) :
++ sprintf (reglist_string, "%s-%s",
++ reg_names[INTERNAL_FP_REGNUM (i)],
++ reg_names[INTERNAL_FP_REGNUM (i + 1)]);
++ }
++ }
++}
+
+/* Function for converting a fp-register mask to a
+ reglistCP8 register list string. */
+void
+avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string)
-+ {
-+ int i;
++{
++ int i;
+
-+ /* Make sure reglist_string is empty */
-+ reglist_string[0] = '\0';
++ /* Make sure reglist_string is empty */
++ reglist_string[0] = '\0';
+
-+ for (i = 0; i < NUM_FP_REGS; ++i)
-+ {
-+ if (reglist_mask & (1 << i))
-+ {
-+ strlen (reglist_string) ?
-+ sprintf (reglist_string, "%s, %s", reglist_string,
-+ reg_names[INTERNAL_FP_REGNUM (i)]) :
-+ sprintf (reglist_string, "%s", reg_names[INTERNAL_FP_REGNUM (i)]);
-+ }
-+ }
-+ }
++ for (i = 0; i < NUM_FP_REGS; ++i)
++ {
++ if (reglist_mask & (1 << i))
++ {
++ strlen (reglist_string) ?
++ sprintf (reglist_string, "%s, %s", reglist_string,
++ reg_names[INTERNAL_FP_REGNUM (i)]) :
++ sprintf (reglist_string, "%s", reg_names[INTERNAL_FP_REGNUM (i)]);
++ }
++ }
++}
+
+void
+avr32_make_reglist16 (int reglist16_vect, char *reglist16_string)
-+ {
-+ int i;
++{
++ int i;
+
-+ /* Make sure reglist16_string is empty */
-+ reglist16_string[0] = '\0';
++ /* Make sure reglist16_string is empty */
++ reglist16_string[0] = '\0';
+
-+ for (i = 0; i < 16; ++i)
-+ {
-+ if (reglist16_vect & (1 << i))
-+ {
-+ strlen (reglist16_string) ?
-+ sprintf (reglist16_string, "%s, %s", reglist16_string,
-+ reg_names[INTERNAL_REGNUM (i)]) :
-+ sprintf (reglist16_string, "%s", reg_names[INTERNAL_REGNUM (i)]);
-+ }
-+ }
-+ }
++ for (i = 0; i < 16; ++i)
++ {
++ if (reglist16_vect & (1 << i))
++ {
++ strlen (reglist16_string) ?
++ sprintf (reglist16_string, "%s, %s", reglist16_string,
++ reg_names[INTERNAL_REGNUM (i)]) :
++ sprintf (reglist16_string, "%s", reg_names[INTERNAL_REGNUM (i)]);
++ }
++ }
++}
+
+int
+avr32_convert_to_reglist16 (int reglist8_vect)
-+ {
-+ int reglist16_vect = 0;
-+ if (reglist8_vect & 0x1)
-+ reglist16_vect |= 0xF;
-+ if (reglist8_vect & 0x2)
-+ reglist16_vect |= 0xF0;
-+ if (reglist8_vect & 0x4)
-+ reglist16_vect |= 0x300;
-+ if (reglist8_vect & 0x8)
-+ reglist16_vect |= 0x400;
-+ if (reglist8_vect & 0x10)
-+ reglist16_vect |= 0x800;
-+ if (reglist8_vect & 0x20)
-+ reglist16_vect |= 0x1000;
-+ if (reglist8_vect & 0x40)
-+ reglist16_vect |= 0x4000;
-+ if (reglist8_vect & 0x80)
-+ reglist16_vect |= 0x8000;
-+
-+ return reglist16_vect;
-+ }
++{
++ int reglist16_vect = 0;
++ if (reglist8_vect & 0x1)
++ reglist16_vect |= 0xF;
++ if (reglist8_vect & 0x2)
++ reglist16_vect |= 0xF0;
++ if (reglist8_vect & 0x4)
++ reglist16_vect |= 0x300;
++ if (reglist8_vect & 0x8)
++ reglist16_vect |= 0x400;
++ if (reglist8_vect & 0x10)
++ reglist16_vect |= 0x800;
++ if (reglist8_vect & 0x20)
++ reglist16_vect |= 0x1000;
++ if (reglist8_vect & 0x40)
++ reglist16_vect |= 0x4000;
++ if (reglist8_vect & 0x80)
++ reglist16_vect |= 0x8000;
++
++ return reglist16_vect;
++}
+
+void
+avr32_make_reglist8 (int reglist8_vect, char *reglist8_string)
-+ {
-+ /* Make sure reglist8_string is empty */
-+ reglist8_string[0] = '\0';
-+
-+ if (reglist8_vect & 0x1)
-+ sprintf (reglist8_string, "r0-r3");
-+ if (reglist8_vect & 0x2)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r4-r7",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "r4-r7");
-+ if (reglist8_vect & 0x4)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r8-r9",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "r8-r9");
-+ if (reglist8_vect & 0x8)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r10",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "r10");
-+ if (reglist8_vect & 0x10)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r11",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "r11");
-+ if (reglist8_vect & 0x20)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r12",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "r12");
-+ if (reglist8_vect & 0x40)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, lr",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "lr");
-+ if (reglist8_vect & 0x80)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, pc",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "pc");
-+ }
++{
++ /* Make sure reglist8_string is empty */
++ reglist8_string[0] = '\0';
++
++ if (reglist8_vect & 0x1)
++ sprintf (reglist8_string, "r0-r3");
++ if (reglist8_vect & 0x2)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r4-r7",
++ reglist8_string) :
++ sprintf (reglist8_string, "r4-r7");
++ if (reglist8_vect & 0x4)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r8-r9",
++ reglist8_string) :
++ sprintf (reglist8_string, "r8-r9");
++ if (reglist8_vect & 0x8)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r10",
++ reglist8_string) :
++ sprintf (reglist8_string, "r10");
++ if (reglist8_vect & 0x10)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r11",
++ reglist8_string) :
++ sprintf (reglist8_string, "r11");
++ if (reglist8_vect & 0x20)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r12",
++ reglist8_string) :
++ sprintf (reglist8_string, "r12");
++ if (reglist8_vect & 0x40)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, lr",
++ reglist8_string) :
++ sprintf (reglist8_string, "lr");
++ if (reglist8_vect & 0x80)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, pc",
++ reglist8_string) :
++ sprintf (reglist8_string, "pc");
++}
+
+int
+avr32_eh_return_data_regno (int n)
-+ {
-+ if (n >= 0 && n <= 3)
-+ return 8 + n;
-+ else
-+ return INVALID_REGNUM;
-+ }
++{
++ if (n >= 0 && n <= 3)
++ return 8 + n;
++ else
++ return INVALID_REGNUM;
++}
+
+/* Compute the distance from register FROM to register TO.
+ These can be the arg pointer, the frame pointer or
+
+int
+avr32_initial_elimination_offset (int from, int to)
-+ {
-+ int i;
-+ int call_saved_regs = 0;
-+ unsigned long saved_reg_mask, saved_fp_reg_mask;
-+ unsigned int local_vars = get_frame_size ();
++{
++ int i;
++ int call_saved_regs = 0;
++ unsigned long saved_reg_mask, saved_fp_reg_mask;
++ unsigned int local_vars = get_frame_size ();
+
-+ saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
-+ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
++ saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
++ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
+
-+ for (i = 0; i < 16; ++i)
-+ {
-+ if (saved_reg_mask & (1 << i))
-+ call_saved_regs += 4;
-+ }
++ for (i = 0; i < 16; ++i)
++ {
++ if (saved_reg_mask & (1 << i))
++ call_saved_regs += 4;
++ }
+
-+ for (i = 0; i < NUM_FP_REGS; ++i)
-+ {
-+ if (saved_fp_reg_mask & (1 << i))
-+ call_saved_regs += 4;
-+ }
++ for (i = 0; i < NUM_FP_REGS; ++i)
++ {
++ if (saved_fp_reg_mask & (1 << i))
++ call_saved_regs += 4;
++ }
+
-+ switch (from)
++ switch (from)
+ {
+ case ARG_POINTER_REGNUM:
+ switch (to)
-+ {
-+ case STACK_POINTER_REGNUM:
-+ return call_saved_regs + local_vars;
-+ case FRAME_POINTER_REGNUM:
-+ return call_saved_regs;
-+ default:
-+ abort ();
-+ }
++ {
++ case STACK_POINTER_REGNUM:
++ return call_saved_regs + local_vars;
++ case FRAME_POINTER_REGNUM:
++ return call_saved_regs;
++ default:
++ abort ();
++ }
+ case FRAME_POINTER_REGNUM:
+ switch (to)
-+ {
-+ case STACK_POINTER_REGNUM:
-+ return local_vars;
-+ default:
-+ abort ();
-+ }
++ {
++ case STACK_POINTER_REGNUM:
++ return local_vars;
++ default:
++ abort ();
++ }
+ default:
+ abort ();
+ }
-+ }
++}
+
+
+/*
+ Returns a rtx used when passing the next argument to a function.
+ avr32_init_cumulative_args() and avr32_function_arg_advance() sets witch
+ register to use.
-+ */
++*/
+rtx
+avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
-+ tree type, int named)
-+ {
-+ int index = -1;
++ tree type, int named)
++{
++ int index = -1;
+
-+ HOST_WIDE_INT arg_size, arg_rsize;
-+ if (type)
-+ {
-+ arg_size = int_size_in_bytes (type);
-+ }
-+ else
-+ {
-+ arg_size = GET_MODE_SIZE (mode);
-+ }
-+ arg_rsize = PUSH_ROUNDING (arg_size);
++ HOST_WIDE_INT arg_size, arg_rsize;
++ if (type)
++ {
++ arg_size = int_size_in_bytes (type);
++ }
++ else
++ {
++ arg_size = GET_MODE_SIZE (mode);
++ }
++ arg_rsize = PUSH_ROUNDING (arg_size);
+
-+ /*
++ /*
+ The last time this macro is called, it is called with mode == VOIDmode,
+ and its result is passed to the call or call_value pattern as operands 2
+ and 3 respectively. */
-+ if (mode == VOIDmode)
-+ {
-+ return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */
-+ }
++ if (mode == VOIDmode)
++ {
++ return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */
++ }
+
-+ if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
-+ {
-+ return NULL_RTX;
-+ }
++ if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
++ {
++ return NULL_RTX;
++ }
+
-+ if (arg_rsize == 8)
-+ {
-+ /* use r11:r10 or r9:r8. */
-+ if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2)))
-+ index = 1;
-+ else if (!(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4)))
-+ index = 3;
-+ else
-+ index = -1;
-+ }
-+ else if (arg_rsize == 4)
-+ { /* Use first available register */
-+ index = 0;
-+ while (index <= LAST_CUM_REG_INDEX && GET_USED_INDEX (cum, index))
-+ index++;
-+ if (index > LAST_CUM_REG_INDEX)
-+ index = -1;
-+ }
++ if (arg_rsize == 8)
++ {
++ /* use r11:r10 or r9:r8. */
++ if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2)))
++ index = 1;
++ else if (!(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4)))
++ index = 3;
++ else
++ index = -1;
++ }
++ else if (arg_rsize == 4)
++ { /* Use first available register */
++ index = 0;
++ while (index <= LAST_CUM_REG_INDEX && GET_USED_INDEX (cum, index))
++ index++;
++ if (index > LAST_CUM_REG_INDEX)
++ index = -1;
++ }
+
-+ SET_REG_INDEX (cum, index);
++ SET_REG_INDEX (cum, index);
+
-+ if (GET_REG_INDEX (cum) >= 0)
-+ return gen_rtx_REG (mode,
-+ avr32_function_arg_reglist[GET_REG_INDEX (cum)]);
++ if (GET_REG_INDEX (cum) >= 0)
++ return gen_rtx_REG (mode,
++ avr32_function_arg_reglist[GET_REG_INDEX (cum)]);
+
-+ return NULL_RTX;
-+ }
++ return NULL_RTX;
++}
+
+/*
+ Set the register used for passing the first argument to a function.
-+ */
++*/
+void
-+avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype,
++avr32_init_cumulative_args (CUMULATIVE_ARGS * cum,
++ tree fntype ATTRIBUTE_UNUSED,
+ rtx libname ATTRIBUTE_UNUSED,
+ tree fndecl ATTRIBUTE_UNUSED)
+ {
+ 0 r1 ||
+ 1 r0 _||_________
+
-+ */
++*/
+void
+avr32_function_arg_advance (CUMULATIVE_ARGS * cum, enum machine_mode mode,
-+ tree type, int named ATTRIBUTE_UNUSED)
-+ {
-+ HOST_WIDE_INT arg_size, arg_rsize;
++ tree type, int named ATTRIBUTE_UNUSED)
++{
++ HOST_WIDE_INT arg_size, arg_rsize;
+
-+ if (type)
-+ {
-+ arg_size = int_size_in_bytes (type);
-+ }
-+ else
-+ {
-+ arg_size = GET_MODE_SIZE (mode);
-+ }
-+ arg_rsize = PUSH_ROUNDING (arg_size);
++ if (type)
++ {
++ arg_size = int_size_in_bytes (type);
++ }
++ else
++ {
++ arg_size = GET_MODE_SIZE (mode);
++ }
++ arg_rsize = PUSH_ROUNDING (arg_size);
+
-+ /* It the argument had to be passed in stack, no register is used. */
-+ if ((*targetm.calls.must_pass_in_stack) (mode, type))
-+ {
-+ cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type));
-+ return;
-+ }
++ /* It the argument had to be passed in stack, no register is used. */
++ if ((*targetm.calls.must_pass_in_stack) (mode, type))
++ {
++ cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type));
++ return;
++ }
+
-+ /* Mark the used registers as "used". */
-+ if (GET_REG_INDEX (cum) >= 0)
-+ {
-+ SET_USED_INDEX (cum, GET_REG_INDEX (cum));
-+ if (arg_rsize == 8)
-+ {
-+ SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1));
-+ }
-+ }
-+ else
-+ {
-+ /* Had to use stack */
-+ cum->stack_pushed_args_size += arg_rsize;
-+ }
-+ }
++ /* Mark the used registers as "used". */
++ if (GET_REG_INDEX (cum) >= 0)
++ {
++ SET_USED_INDEX (cum, GET_REG_INDEX (cum));
++ if (arg_rsize == 8)
++ {
++ SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1));
++ }
++ }
++ else
++ {
++ /* Had to use stack */
++ cum->stack_pushed_args_size += arg_rsize;
++ }
++}
+
+/*
+ Defines witch direction to go to find the next register to use if the
+ size not a multiple of 4. */
+enum direction
+avr32_function_arg_padding (enum machine_mode mode ATTRIBUTE_UNUSED,
-+ tree type)
-+ {
-+ /* Pad upward for all aggregates except byte and halfword sized aggregates
++ tree type)
++{
++ /* Pad upward for all aggregates except byte and halfword sized aggregates
+ which can be passed in registers. */
-+ if (type
-+ && AGGREGATE_TYPE_P (type)
-+ && (int_size_in_bytes (type) != 1)
-+ && !((int_size_in_bytes (type) == 2)
-+ && TYPE_ALIGN_UNIT (type) >= 2)
-+ && (int_size_in_bytes (type) & 0x3))
-+ {
-+ return upward;
-+ }
++ if (type
++ && AGGREGATE_TYPE_P (type)
++ && (int_size_in_bytes (type) != 1)
++ && !((int_size_in_bytes (type) == 2)
++ && TYPE_ALIGN_UNIT (type) >= 2)
++ && (int_size_in_bytes (type) & 0x3))
++ {
++ return upward;
++ }
+
-+ return downward;
-+ }
++ return downward;
++}
+
+/*
+ Return a rtx used for the return value from a function call.
-+ */
++*/
+rtx
+avr32_function_value (tree type, tree func, bool outgoing ATTRIBUTE_UNUSED)
-+ {
-+ if (avr32_return_in_memory (type, func))
-+ return NULL_RTX;
++{
++ if (avr32_return_in_memory (type, func))
++ return NULL_RTX;
+
-+ if (int_size_in_bytes (type) <= 4)
-+ if (avr32_return_in_msb (type))
-+ /* Aggregates of size less than a word which does align the data in the
++ if (int_size_in_bytes (type) <= 4)
++ if (avr32_return_in_msb (type))
++ /* Aggregates of size less than a word which does align the data in the
+ MSB must use SImode for r12. */
-+ return gen_rtx_REG (SImode, RET_REGISTER);
-+ else
-+ return gen_rtx_REG (TYPE_MODE (type), RET_REGISTER);
-+ else if (int_size_in_bytes (type) <= 8)
-+ return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11));
++ return gen_rtx_REG (SImode, RET_REGISTER);
++ else
++ return gen_rtx_REG (TYPE_MODE (type), RET_REGISTER);
++ else if (int_size_in_bytes (type) <= 8)
++ return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11));
+
-+ return NULL_RTX;
-+ }
++ return NULL_RTX;
++}
+
+/*
+ Return a rtx used for the return value from a library function call.
-+ */
++*/
+rtx
+avr32_libcall_value (enum machine_mode mode)
-+ {
++{
+
-+ if (GET_MODE_SIZE (mode) <= 4)
-+ return gen_rtx_REG (mode, RET_REGISTER);
-+ else if (GET_MODE_SIZE (mode) <= 8)
-+ return gen_rtx_REG (mode, INTERNAL_REGNUM (11));
-+ else
-+ return NULL_RTX;
-+ }
++ if (GET_MODE_SIZE (mode) <= 4)
++ return gen_rtx_REG (mode, RET_REGISTER);
++ else if (GET_MODE_SIZE (mode) <= 8)
++ return gen_rtx_REG (mode, INTERNAL_REGNUM (11));
++ else
++ return NULL_RTX;
++}
+
+/* Return TRUE if X references a SYMBOL_REF. */
+int
+symbol_mentioned_p (rtx x)
-+ {
-+ const char *fmt;
-+ int i;
++{
++ const char *fmt;
++ int i;
+
-+ if (GET_CODE (x) == SYMBOL_REF)
-+ return 1;
++ if (GET_CODE (x) == SYMBOL_REF)
++ return 1;
+
-+ fmt = GET_RTX_FORMAT (GET_CODE (x));
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
+
-+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
-+ {
-+ if (fmt[i] == 'E')
-+ {
-+ int j;
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'E')
++ {
++ int j;
+
-+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
-+ if (symbol_mentioned_p (XVECEXP (x, i, j)))
-+ return 1;
-+ }
-+ else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
-+ return 1;
-+ }
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ if (symbol_mentioned_p (XVECEXP (x, i, j)))
++ return 1;
++ }
++ else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
++ return 1;
++ }
+
-+ return 0;
-+ }
++ return 0;
++}
+
+/* Return TRUE if X references a LABEL_REF. */
+int
+label_mentioned_p (rtx x)
-+ {
-+ const char *fmt;
-+ int i;
++{
++ const char *fmt;
++ int i;
+
-+ if (GET_CODE (x) == LABEL_REF)
-+ return 1;
++ if (GET_CODE (x) == LABEL_REF)
++ return 1;
+
-+ fmt = GET_RTX_FORMAT (GET_CODE (x));
-+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
-+ {
-+ if (fmt[i] == 'E')
-+ {
-+ int j;
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'E')
++ {
++ int j;
+
-+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
-+ if (label_mentioned_p (XVECEXP (x, i, j)))
-+ return 1;
-+ }
-+ else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
-+ return 1;
-+ }
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ if (label_mentioned_p (XVECEXP (x, i, j)))
++ return 1;
++ }
++ else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
++ return 1;
++ }
+
-+ return 0;
-+ }
++ return 0;
++}
++
++/* Return TRUE if X contains a MEM expression. */
++int
++mem_mentioned_p (rtx x)
++{
++ const char *fmt;
++ int i;
++
++ if (MEM_P (x))
++ return 1;
++
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'E')
++ {
++ int j;
++
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ if (mem_mentioned_p (XVECEXP (x, i, j)))
++ return 1;
++ }
++ else if (fmt[i] == 'e' && mem_mentioned_p (XEXP (x, i)))
++ return 1;
++ }
+
++ return 0;
++}
+
+int
+avr32_legitimate_pic_operand_p (rtx x)
-+ {
++{
+
-+ /* We can't have const, this must be broken down to a symbol. */
-+ if (GET_CODE (x) == CONST)
-+ return FALSE;
++ /* We can't have const, this must be broken down to a symbol. */
++ if (GET_CODE (x) == CONST)
++ return FALSE;
+
-+ /* Can't access symbols or labels via the constant pool either */
-+ if ((GET_CODE (x) == SYMBOL_REF
-+ && CONSTANT_POOL_ADDRESS_P (x)
-+ && (symbol_mentioned_p (get_pool_constant (x))
-+ || label_mentioned_p (get_pool_constant (x)))))
-+ return FALSE;
++ /* Can't access symbols or labels via the constant pool either */
++ if ((GET_CODE (x) == SYMBOL_REF
++ && CONSTANT_POOL_ADDRESS_P (x)
++ && (symbol_mentioned_p (get_pool_constant (x))
++ || label_mentioned_p (get_pool_constant (x)))))
++ return FALSE;
+
-+ return TRUE;
-+ }
++ return TRUE;
++}
+
+
+rtx
+legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
-+ rtx reg)
-+ {
++ rtx reg)
++{
+
-+ if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
-+ {
-+ int subregs = 0;
++ if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
++ {
++ int subregs = 0;
+
-+ if (reg == 0)
-+ {
-+ if (no_new_pseudos)
-+ abort ();
-+ else
-+ reg = gen_reg_rtx (Pmode);
++ if (reg == 0)
++ {
++ if (no_new_pseudos)
++ abort ();
++ else
++ reg = gen_reg_rtx (Pmode);
+
-+ subregs = 1;
-+ }
++ subregs = 1;
++ }
+
-+ emit_move_insn (reg, orig);
++ emit_move_insn (reg, orig);
+
-+ /* Only set current function as using pic offset table if flag_pic is
++ /* Only set current function as using pic offset table if flag_pic is
+ set. This is because this function is also used if
+ TARGET_HAS_ASM_ADDR_PSEUDOS is set. */
-+ if (flag_pic)
-+ current_function_uses_pic_offset_table = 1;
++ if (flag_pic)
++ current_function_uses_pic_offset_table = 1;
+
-+ /* Put a REG_EQUAL note on this insn, so that it can be optimized by
++ /* Put a REG_EQUAL note on this insn, so that it can be optimized by
+ loop. */
-+ return reg;
-+ }
-+ else if (GET_CODE (orig) == CONST)
-+ {
-+ rtx base, offset;
-+
-+ if (flag_pic
-+ && GET_CODE (XEXP (orig, 0)) == PLUS
-+ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
-+ return orig;
-+
-+ if (reg == 0)
-+ {
-+ if (no_new_pseudos)
-+ abort ();
-+ else
-+ reg = gen_reg_rtx (Pmode);
-+ }
++ return reg;
++ }
++ else if (GET_CODE (orig) == CONST)
++ {
++ rtx base, offset;
++
++ if (flag_pic
++ && GET_CODE (XEXP (orig, 0)) == PLUS
++ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
++ return orig;
++
++ if (reg == 0)
++ {
++ if (no_new_pseudos)
++ abort ();
++ else
++ reg = gen_reg_rtx (Pmode);
++ }
+
-+ if (GET_CODE (XEXP (orig, 0)) == PLUS)
-+ {
-+ base =
-+ legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
-+ offset =
-+ legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
-+ base == reg ? 0 : reg);
-+ }
-+ else
-+ abort ();
++ if (GET_CODE (XEXP (orig, 0)) == PLUS)
++ {
++ base =
++ legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
++ offset =
++ legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
++ base == reg ? 0 : reg);
++ }
++ else
++ abort ();
+
-+ if (GET_CODE (offset) == CONST_INT)
-+ {
-+ /* The base register doesn't really matter, we only want to test
++ if (GET_CODE (offset) == CONST_INT)
++ {
++ /* The base register doesn't really matter, we only want to test
+ the index for the appropriate mode. */
-+ if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21"))
-+ {
-+ if (!no_new_pseudos)
-+ offset = force_reg (Pmode, offset);
-+ else
-+ abort ();
-+ }
++ if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21"))
++ {
++ if (!no_new_pseudos)
++ offset = force_reg (Pmode, offset);
++ else
++ abort ();
++ }
+
-+ if (GET_CODE (offset) == CONST_INT)
-+ return plus_constant (base, INTVAL (offset));
-+ }
++ if (GET_CODE (offset) == CONST_INT)
++ return plus_constant (base, INTVAL (offset));
++ }
+
-+ return gen_rtx_PLUS (Pmode, base, offset);
-+ }
++ return gen_rtx_PLUS (Pmode, base, offset);
++ }
+
-+ return orig;
-+ }
++ return orig;
++}
+
+/* Generate code to load the PIC register. */
+void
+avr32_load_pic_register (void)
-+ {
-+ rtx l1, pic_tmp;
-+ rtx global_offset_table;
++{
++ rtx l1, pic_tmp;
++ rtx global_offset_table;
+
-+ if ((current_function_uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT)
-+ return;
++ if ((current_function_uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT)
++ return;
+
-+ if (!flag_pic)
-+ abort ();
++ if (!flag_pic)
++ abort ();
+
-+ l1 = gen_label_rtx ();
++ l1 = gen_label_rtx ();
+
-+ global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
-+ pic_tmp =
-+ gen_rtx_CONST (Pmode,
-+ gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1),
-+ global_offset_table));
-+ emit_insn (gen_pic_load_addr
-+ (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp)));
-+ emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1));
++ global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
++ pic_tmp =
++ gen_rtx_CONST (Pmode,
++ gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1),
++ global_offset_table));
++ emit_insn (gen_pic_load_addr
++ (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp)));
++ emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1));
+
-+ /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp
++ /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp
+ can cause life info to screw up. */
-+ emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
-+ }
++ emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
++}
+
+
+
+ 4-byte register, FUNCTION_VALUE should provide an SImode rtx. */
+bool
+avr32_return_in_msb (tree type ATTRIBUTE_UNUSED)
-+ {
-+ /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) ||
++{
++ /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) ||
+ ((int_size_in_bytes(type) == 2) && TYPE_ALIGN_UNIT(type) >= 2)) return
+ false; else return true; */
+
-+ return false;
-+ }
++ return false;
++}
+
+
+/*
+
+ BLKmode and all other modes that is larger than 64 bits are returned in
+ memory.
-+ */
++*/
+bool
+avr32_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
-+ {
-+ if (TYPE_MODE (type) == VOIDmode)
-+ return false;
++{
++ if (TYPE_MODE (type) == VOIDmode)
++ return false;
+
-+ if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD)
-+ || int_size_in_bytes (type) == -1)
-+ {
-+ return true;
-+ }
++ if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD)
++ || int_size_in_bytes (type) == -1)
++ {
++ return true;
++ }
+
-+ /* If we have an aggregate then use the same mechanism as when checking if
++ /* If we have an aggregate then use the same mechanism as when checking if
+ it should be passed on the stack. */
-+ if (type
-+ && AGGREGATE_TYPE_P (type)
-+ && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type))
-+ return true;
++ if (type
++ && AGGREGATE_TYPE_P (type)
++ && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type))
++ return true;
+
-+ return false;
-+ }
++ return false;
++}
+
+
+/* Output the constant part of the trampoline.
+ ; filled in by avr32_initialize_trampoline()
+ .long 0 ; Address to subrutine,
+ ; filled in by avr32_initialize_trampoline()
-+ */
++*/
+void
+avr32_trampoline_template (FILE * file)
-+ {
-+ fprintf (file, "\tlddpc r0, pc[8]\n");
-+ fprintf (file, "\tlddpc pc, pc[8]\n");
-+ /* make room for the address of the static chain. */
-+ fprintf (file, "\t.long\t0\n");
-+ /* make room for the address to the subrutine. */
-+ fprintf (file, "\t.long\t0\n");
-+ }
++{
++ fprintf (file, "\tlddpc r0, pc[8]\n");
++ fprintf (file, "\tlddpc pc, pc[8]\n");
++ /* make room for the address of the static chain. */
++ fprintf (file, "\t.long\t0\n");
++ /* make room for the address to the subrutine. */
++ fprintf (file, "\t.long\t0\n");
++}
+
+
+/*
+ Initialize the variable parts of a trampoline.
-+ */
++*/
+void
+avr32_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
-+ {
-+ /* Store the address to the static chain. */
-+ emit_move_insn (gen_rtx_MEM
-+ (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)),
-+ static_chain);
-+
-+ /* Store the address to the function. */
-+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)),
-+ fnaddr);
-+
-+ emit_insn (gen_cache (gen_rtx_REG (SImode, 13),
-+ gen_rtx_CONST_INT (SImode,
-+ AVR32_CACHE_INVALIDATE_ICACHE)));
-+ }
++{
++ /* Store the address to the static chain. */
++ emit_move_insn (gen_rtx_MEM
++ (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)),
++ static_chain);
++
++ /* Store the address to the function. */
++ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)),
++ fnaddr);
++
++ emit_insn (gen_cache (gen_rtx_REG (SImode, 13),
++ gen_rtx_CONST_INT (SImode,
++ AVR32_CACHE_INVALIDATE_ICACHE)));
++}
+
+/* Return nonzero if X is valid as an addressing register. */
+int
+avr32_address_register_rtx_p (rtx x, int strict_p)
-+ {
-+ int regno;
++{
++ int regno;
+
-+ if (!register_operand(x, GET_MODE(x)))
-+ return 0;
++ if (!register_operand(x, GET_MODE(x)))
++ return 0;
+
-+ /* If strict we require the register to be a hard register. */
-+ if (strict_p
-+ && !REG_P(x))
-+ return 0;
++ /* If strict we require the register to be a hard register. */
++ if (strict_p
++ && !REG_P(x))
++ return 0;
+
-+ regno = REGNO (x);
++ regno = REGNO (x);
+
-+ if (strict_p)
-+ return REGNO_OK_FOR_BASE_P (regno);
++ if (strict_p)
++ return REGNO_OK_FOR_BASE_P (regno);
+
-+ return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER);
-+ }
++ return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER);
++}
+
+/* Return nonzero if INDEX is valid for an address index operand. */
+int
+avr32_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
-+ {
-+ enum rtx_code code = GET_CODE (index);
++{
++ enum rtx_code code = GET_CODE (index);
+
-+ if (GET_MODE_SIZE (mode) > 8)
-+ return 0;
++ if (GET_MODE_SIZE (mode) > 8)
++ return 0;
+
-+ /* Standard coprocessor addressing modes. */
-+ if (code == CONST_INT)
-+ {
-+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
-+ /* Coprocessor mem insns has a smaller reach than ordinary mem insns */
-+ return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ku14");
-+ else
-+ return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16");
-+ }
++ /* Standard coprocessor addressing modes. */
++ if (code == CONST_INT)
++ {
++ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
++ /* Coprocessor mem insns has a smaller reach than ordinary mem insns */
++ return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ku14");
++ else
++ return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16");
++ }
+
-+ if (avr32_address_register_rtx_p (index, strict_p))
-+ return 1;
++ if (avr32_address_register_rtx_p (index, strict_p))
++ return 1;
+
-+ if (code == MULT)
-+ {
-+ rtx xiop0 = XEXP (index, 0);
-+ rtx xiop1 = XEXP (index, 1);
-+ return ((avr32_address_register_rtx_p (xiop0, strict_p)
-+ && power_of_two_operand (xiop1, SImode)
-+ && (INTVAL (xiop1) <= 8))
-+ || (avr32_address_register_rtx_p (xiop1, strict_p)
-+ && power_of_two_operand (xiop0, SImode)
-+ && (INTVAL (xiop0) <= 8)));
-+ }
-+ else if (code == ASHIFT)
-+ {
-+ rtx op = XEXP (index, 1);
++ if (code == MULT)
++ {
++ rtx xiop0 = XEXP (index, 0);
++ rtx xiop1 = XEXP (index, 1);
++ return ((avr32_address_register_rtx_p (xiop0, strict_p)
++ && power_of_two_operand (xiop1, SImode)
++ && (INTVAL (xiop1) <= 8))
++ || (avr32_address_register_rtx_p (xiop1, strict_p)
++ && power_of_two_operand (xiop0, SImode)
++ && (INTVAL (xiop0) <= 8)));
++ }
++ else if (code == ASHIFT)
++ {
++ rtx op = XEXP (index, 1);
+
-+ return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p)
-+ && GET_CODE (op) == CONST_INT
-+ && INTVAL (op) > 0 && INTVAL (op) <= 3);
-+ }
++ return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p)
++ && GET_CODE (op) == CONST_INT
++ && INTVAL (op) > 0 && INTVAL (op) <= 3);
++ }
+
-+ return 0;
-+ }
++ return 0;
++}
+
+/*
+ Used in the GO_IF_LEGITIMATE_ADDRESS macro. Returns a nonzero value if
+
+ Returns NO_REGS if the address is not legatime, GENERAL_REGS or ALL_REGS
+ if it is.
-+ */
++*/
+
+/* Forward declaration*/
+int is_minipool_label (rtx label);
+
+int
+avr32_legitimate_address (enum machine_mode mode, rtx x, int strict)
-+ {
++{
+
-+ switch (GET_CODE (x))
++ switch (GET_CODE (x))
+ {
+ case REG:
+ return avr32_address_register_rtx_p (x, strict);
++ case CONST_INT:
++ return ((mode==SImode)
++ && CONST_OK_FOR_CONSTRAINT_P(INTVAL(x), 'K', "Ks17"));
+ case CONST:
-+ {
-+ rtx label = avr32_find_symbol (x);
-+ if (label
-+ &&
-+ ((CONSTANT_POOL_ADDRESS_P (label)
-+ && !(flag_pic
-+ && (symbol_mentioned_p (get_pool_constant (label))
-+ || label_mentioned_p (get_pool_constant (label)))))
-+ /* TODO! Can this ever happen??? */
-+ || ((GET_CODE (label) == LABEL_REF)
-+ && GET_CODE (XEXP (label, 0)) == CODE_LABEL
-+ && is_minipool_label (XEXP (label, 0)))))
-+ {
-+ return TRUE;
-+ }
-+ }
-+ break;
++ {
++ rtx label = avr32_find_symbol (x);
++ if (label
++ &&
++ ((CONSTANT_POOL_ADDRESS_P (label)
++ && !(flag_pic
++ && (symbol_mentioned_p (get_pool_constant (label))
++ || label_mentioned_p (get_pool_constant (label)))))
++ /* TODO! Can this ever happen??? */
++ || ((GET_CODE (label) == LABEL_REF)
++ && GET_CODE (XEXP (label, 0)) == CODE_LABEL
++ && is_minipool_label (XEXP (label, 0)))
++ /*|| ((GET_CODE (label) == SYMBOL_REF)
++ && mode == SImode
++ && SYMBOL_REF_RMW_ADDR(label))*/))
++ {
++ return TRUE;
++ }
++ }
++ break;
+ case LABEL_REF:
+ if (GET_CODE (XEXP (x, 0)) == CODE_LABEL
-+ && is_minipool_label (XEXP (x, 0)))
-+ {
-+ return TRUE;
-+ }
++ && is_minipool_label (XEXP (x, 0)))
++ {
++ return TRUE;
++ }
+ break;
+ case SYMBOL_REF:
-+ {
-+ if (CONSTANT_POOL_ADDRESS_P (x)
-+ && !(flag_pic
-+ && (symbol_mentioned_p (get_pool_constant (x))
-+ || label_mentioned_p (get_pool_constant (x)))))
-+ return TRUE;
-+ /*
-+ A symbol_ref is only legal if it is a function. If all of them are
-+ legal, a pseudo reg that is a constant will be replaced by a
-+ symbol_ref and make illegale code. SYMBOL_REF_FLAG is set by
-+ ENCODE_SECTION_INFO. */
-+ else if (SYMBOL_REF_RCALL_FUNCTION_P (x))
-+ return TRUE;
-+ break;
-+ }
++ {
++ if (CONSTANT_POOL_ADDRESS_P (x)
++ && !(flag_pic
++ && (symbol_mentioned_p (get_pool_constant (x))
++ || label_mentioned_p (get_pool_constant (x)))))
++ return TRUE;
++ else if (SYMBOL_REF_RCALL_FUNCTION_P (x)
++ || (mode == SImode
++ && SYMBOL_REF_RMW_ADDR (x)))
++ return TRUE;
++ break;
++ }
+ case PRE_DEC: /* (pre_dec (...)) */
+ case POST_INC: /* (post_inc (...)) */
+ return avr32_address_register_rtx_p (XEXP (x, 0), strict);
+ case PLUS: /* (plus (...) (...)) */
-+ {
-+ rtx xop0 = XEXP (x, 0);
-+ rtx xop1 = XEXP (x, 1);
++ {
++ rtx xop0 = XEXP (x, 0);
++ rtx xop1 = XEXP (x, 1);
+
-+ return ((avr32_address_register_rtx_p (xop0, strict)
-+ && avr32_legitimate_index_p (mode, xop1, strict))
-+ || (avr32_address_register_rtx_p (xop1, strict)
-+ && avr32_legitimate_index_p (mode, xop0, strict)));
-+ }
++ return ((avr32_address_register_rtx_p (xop0, strict)
++ && avr32_legitimate_index_p (mode, xop1, strict))
++ || (avr32_address_register_rtx_p (xop1, strict)
++ && avr32_legitimate_index_p (mode, xop0, strict)));
++ }
+ default:
+ break;
+ }
+
-+ return FALSE;
-+ }
++ return FALSE;
++}
+
+
+int
++avr32_const_ok_for_move (HOST_WIDE_INT c)
++{
++ if ( TARGET_V2_INSNS )
++ return ( avr32_const_ok_for_constraint_p (c, 'K', "Ks21")
++ /* movh instruction */
++ || avr32_hi16_immediate_operand (GEN_INT(c), VOIDmode) );
++ else
++ return avr32_const_ok_for_constraint_p (c, 'K', "Ks21");
++}
++
++int
+avr32_const_double_immediate (rtx value)
-+ {
-+ HOST_WIDE_INT hi, lo;
++{
++ HOST_WIDE_INT hi, lo;
+
-+ if (GET_CODE (value) != CONST_DOUBLE)
-+ return FALSE;
++ if (GET_CODE (value) != CONST_DOUBLE)
++ return FALSE;
+
-+ if (SCALAR_FLOAT_MODE_P (GET_MODE (value)))
-+ {
-+ HOST_WIDE_INT target_float[2];
-+ hi = lo = 0;
-+ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value),
-+ GET_MODE (value));
-+ lo = target_float[0];
-+ hi = target_float[1];
-+ }
-+ else
-+ {
-+ hi = CONST_DOUBLE_HIGH (value);
-+ lo = CONST_DOUBLE_LOW (value);
-+ }
++ if (SCALAR_FLOAT_MODE_P (GET_MODE (value)))
++ {
++ HOST_WIDE_INT target_float[2];
++ hi = lo = 0;
++ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value),
++ GET_MODE (value));
++ lo = target_float[0];
++ hi = target_float[1];
++ }
++ else
++ {
++ hi = CONST_DOUBLE_HIGH (value);
++ lo = CONST_DOUBLE_LOW (value);
++ }
+
-+ if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
-+ && (GET_MODE (value) == SFmode
-+ || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
-+ {
-+ return TRUE;
-+ }
++ if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
++ && (GET_MODE (value) == SFmode
++ || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
++ {
++ return TRUE;
++ }
+
-+ return FALSE;
-+ }
++ return FALSE;
++}
+
+
+int
+avr32_legitimate_constant_p (rtx x)
-+ {
-+ switch (GET_CODE (x))
++{
++ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ /* Check if we should put large immediate into constant pool
+ if (!avr32_imm_in_const_pool)
+ return 1;
+
-+ return avr32_const_ok_for_constraint_p (INTVAL (x), 'K', "Ks21");
++ return avr32_const_ok_for_move (INTVAL (x));
+ case CONST_DOUBLE:
+ /* Check if we should put large immediate into constant pool
+ or load them directly with mov/orh.*/
+ return 1;
+
+ if (GET_MODE (x) == SFmode
-+ || GET_MODE (x) == DFmode || GET_MODE (x) == DImode)
-+ return avr32_const_double_immediate (x);
++ || GET_MODE (x) == DFmode || GET_MODE (x) == DImode)
++ return avr32_const_double_immediate (x);
+ else
-+ return 0;
++ return 0;
+ case LABEL_REF:
-+ return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS;
+ case SYMBOL_REF:
-+ return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS;
++ return avr32_find_symbol (x) && (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS);
+ case CONST:
+ case HIGH:
+ case CONST_VECTOR:
+ debug_rtx (x);
+ return 1;
+ }
-+ }
++}
+
+
+/* Strip any special encoding from labels */
+ while (1)
+ {
+ switch (stripped[0])
-+ {
-+ case '#':
-+ stripped = strchr (name + 1, '#') + 1;
-+ break;
-+ case '*':
-+ stripped = &stripped[1];
-+ break;
-+ default:
-+ return stripped;
-+ }
++ {
++ case '#':
++ stripped = strchr (name + 1, '#') + 1;
++ break;
++ case '*':
++ stripped = &stripped[1];
++ break;
++ default:
++ return stripped;
++ }
+ }
+}
+
+
+ machine->minipool_label_head = 0;
+ machine->minipool_label_tail = 0;
++ machine->ifcvt_after_reload = 0;
+ return machine;
+}
+
+void
+avr32_init_expanders (void)
-+ {
-+ /* Arrange to initialize and mark the machine per-function status. */
-+ init_machine_status = avr32_init_machine_status;
-+ }
++{
++ /* Arrange to initialize and mark the machine per-function status. */
++ init_machine_status = avr32_init_machine_status;
++}
+
+
+/* Return an RTX indicating where the return address to the
+
+rtx
+avr32_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
-+ {
-+ if (count != 0)
-+ return NULL_RTX;
++{
++ if (count != 0)
++ return NULL_RTX;
+
-+ return get_hard_reg_initial_val (Pmode, LR_REGNUM);
-+ }
++ return get_hard_reg_initial_val (Pmode, LR_REGNUM);
++}
+
+
+void
+avr32_encode_section_info (tree decl, rtx rtl, int first)
-+ {
++{
++ default_encode_section_info(decl, rtl, first);
+
-+ if (first && DECL_P (decl))
-+ {
-+ /* Set SYMBOL_REG_FLAG for local functions */
-+ if (!TREE_PUBLIC (decl) && TREE_CODE (decl) == FUNCTION_DECL)
-+ {
-+ if ((*targetm.binds_local_p) (decl))
-+ {
-+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
-+ }
-+ }
-+ }
++ if ( TREE_CODE (decl) == VAR_DECL
++ && (GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
++ && (lookup_attribute ("rmw_addressable", DECL_ATTRIBUTES (decl))
++ || TARGET_RMW_ADDRESSABLE_DATA) ){
++ if ( !TARGET_RMW || flag_pic )
++ return;
++ SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
+ }
++}
++
++void
++avr32_asm_output_label (FILE * stream, const char *name)
++{
++ name = avr32_strip_name_encoding (name);
++
++ /* Print the label. */
++ assemble_name (stream, name);
++ fprintf (stream, ":\n");
++}
++
+
+
+void
-+avr32_asm_output_ascii (FILE * stream, char *ptr, int len)
-+ {
-+ int i, i_new = 0;
-+ char *new_ptr = xmalloc (4 * len);
-+ if (new_ptr == NULL)
-+ internal_error ("Out of memory.");
-+
-+ for (i = 0; i < len; i++)
-+ {
-+ if (ptr[i] == '\n')
-+ {
-+ new_ptr[i_new++] = '\\';
-+ new_ptr[i_new++] = '0';
-+ new_ptr[i_new++] = '1';
-+ new_ptr[i_new++] = '2';
-+ }
-+ else if (ptr[i] == '\"')
-+ {
-+ new_ptr[i_new++] = '\\';
-+ new_ptr[i_new++] = '\"';
-+ }
-+ else if (ptr[i] == '\\')
-+ {
-+ new_ptr[i_new++] = '\\';
-+ new_ptr[i_new++] = '\\';
-+ }
-+ else if (ptr[i] == '\0' && i + 1 < len)
-+ {
-+ new_ptr[i_new++] = '\\';
-+ new_ptr[i_new++] = '0';
-+ }
-+ else
-+ {
-+ new_ptr[i_new++] = ptr[i];
-+ }
-+ }
-+
-+ /* Terminate new_ptr. */
-+ new_ptr[i_new] = '\0';
-+ fprintf (stream, "\t.ascii\t\"%s\"\n", new_ptr);
-+ free (new_ptr);
-+ }
-+
-+
-+void
-+avr32_asm_output_label (FILE * stream, const char *name)
-+ {
-+ name = avr32_strip_name_encoding (name);
-+
-+ /* Print the label. */
-+ assemble_name (stream, name);
-+ fprintf (stream, ":\n");
-+ }
-+
-+
-+
-+void
-+avr32_asm_weaken_label (FILE * stream, const char *name)
-+ {
-+ fprintf (stream, "\t.weak ");
-+ assemble_name (stream, name);
-+ fprintf (stream, "\n");
-+ }
++avr32_asm_weaken_label (FILE * stream, const char *name)
++{
++ fprintf (stream, "\t.weak ");
++ assemble_name (stream, name);
++ fprintf (stream, "\n");
++}
+
+/*
+ Checks if a labelref is equal to a reserved word in the assembler. If it is,
+ insert a '_' before the label name.
-+ */
++*/
+void
+avr32_asm_output_labelref (FILE * stream, const char *name)
-+ {
-+ int verbatim = FALSE;
-+ const char *stripped = name;
-+ int strip_finished = FALSE;
++{
++ int verbatim = FALSE;
++ const char *stripped = name;
++ int strip_finished = FALSE;
+
-+ while (!strip_finished)
-+ {
-+ switch (stripped[0])
-+ {
-+ case '#':
-+ stripped = strchr (name + 1, '#') + 1;
-+ break;
-+ case '*':
-+ stripped = &stripped[1];
-+ verbatim = TRUE;
-+ break;
-+ default:
-+ strip_finished = TRUE;
-+ break;
-+ }
-+ }
++ while (!strip_finished)
++ {
++ switch (stripped[0])
++ {
++ case '#':
++ stripped = strchr (name + 1, '#') + 1;
++ break;
++ case '*':
++ stripped = &stripped[1];
++ verbatim = TRUE;
++ break;
++ default:
++ strip_finished = TRUE;
++ break;
++ }
++ }
+
-+ if (verbatim)
-+ fputs (stripped, stream);
-+ else
-+ asm_fprintf (stream, "%U%s", stripped);
-+ }
++ if (verbatim)
++ fputs (stripped, stream);
++ else
++ asm_fprintf (stream, "%U%s", stripped);
++}
+
+
+
+ Returns NULL_RTX if the compare is not redundant
+ or the new condition to use in the conditional
+ instruction if the compare is redundant.
-+ */
++*/
+static rtx
+is_compare_redundant (rtx compare_exp, rtx next_cond)
-+ {
-+ int z_flag_valid = FALSE;
-+ int n_flag_valid = FALSE;
-+ rtx new_cond;
++{
++ int z_flag_valid = FALSE;
++ int n_flag_valid = FALSE;
++ rtx new_cond;
+
-+ if (GET_CODE (compare_exp) != COMPARE)
-+ return NULL_RTX;
++ if (GET_CODE (compare_exp) != COMPARE
++ && GET_CODE (compare_exp) != AND)
++ return NULL_RTX;
+
+
-+ if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp))
-+ {
-+ /* cc0 already contains the correct comparison -> delete cmp insn */
-+ return next_cond;
-+ }
++ if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp))
++ {
++ /* cc0 already contains the correct comparison -> delete cmp insn */
++ return next_cond;
++ }
+
-+ if (GET_MODE (compare_exp) != SImode)
-+ return NULL_RTX;
++ if (GET_MODE (compare_exp) != SImode)
++ return NULL_RTX;
+
-+ switch (cc_prev_status.mdep.flags)
++ switch (cc_prev_status.mdep.flags)
+ {
+ case CC_SET_VNCZ:
+ case CC_SET_NCZ:
+ z_flag_valid = TRUE;
+ }
+
-+ if (cc_prev_status.mdep.value
-+ && REG_P (XEXP (compare_exp, 0))
-+ && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value)
-+ && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT
-+ && next_cond != NULL_RTX)
-+ {
-+ if (INTVAL (XEXP (compare_exp, 1)) == 0
-+ && z_flag_valid
-+ && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE))
-+ /* We can skip comparison Z flag is already reflecting ops[0] */
-+ return next_cond;
-+ else if (n_flag_valid
-+ && ((INTVAL (XEXP (compare_exp, 1)) == 0
-+ && (GET_CODE (next_cond) == GE
-+ || GET_CODE (next_cond) == LT))
-+ || (INTVAL (XEXP (compare_exp, 1)) == -1
-+ && (GET_CODE (next_cond) == GT
-+ || GET_CODE (next_cond) == LE))))
-+ {
-+ /* We can skip comparison N flag is already reflecting ops[0],
++ if (cc_prev_status.mdep.value
++ && GET_CODE (compare_exp) == COMPARE
++ && REG_P (XEXP (compare_exp, 0))
++ && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value)
++ && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT
++ && next_cond != NULL_RTX)
++ {
++ if (INTVAL (XEXP (compare_exp, 1)) == 0
++ && z_flag_valid
++ && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE))
++ /* We can skip comparison Z flag is already reflecting ops[0] */
++ return next_cond;
++ else if (n_flag_valid
++ && ((INTVAL (XEXP (compare_exp, 1)) == 0
++ && (GET_CODE (next_cond) == GE
++ || GET_CODE (next_cond) == LT))
++ || (INTVAL (XEXP (compare_exp, 1)) == -1
++ && (GET_CODE (next_cond) == GT
++ || GET_CODE (next_cond) == LE))))
++ {
++ /* We can skip comparison N flag is already reflecting ops[0],
+ which means that we can use the mi/pl conditions to check if
+ ops[0] is GE or LT 0. */
-+ if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT))
-+ new_cond =
-+ gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
-+ UNSPEC_COND_PL);
-+ else
-+ new_cond =
-+ gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
-+ UNSPEC_COND_MI);
-+ return new_cond;
-+ }
-+ }
-+ return NULL_RTX;
-+ }
++ if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT))
++ new_cond =
++ gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
++ UNSPEC_COND_PL);
++ else
++ new_cond =
++ gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
++ UNSPEC_COND_MI);
++ return new_cond;
++ }
++ }
++ return NULL_RTX;
++}
+
+/* Updates cc_status. */
+void
+avr32_notice_update_cc (rtx exp, rtx insn)
-+ {
-+ switch (get_attr_cc (insn))
++{
++ enum attr_cc attr_cc = get_attr_cc (insn);
++
++ if ( attr_cc == CC_SET_Z_IF_NOT_V2 )
++ if (TARGET_V2_INSNS)
++ attr_cc = CC_NONE;
++ else
++ attr_cc = CC_SET_Z;
++
++ switch (attr_cc)
+ {
+ case CC_CALL_SET:
+ CC_STATUS_INIT;
+ FPCC_STATUS_INIT;
+ /* Check if the function call returns a value in r12 */
+ if (REG_P (recog_data.operand[0])
-+ && REGNO (recog_data.operand[0]) == RETVAL_REGNUM)
-+ {
-+ cc_status.flags = 0;
-+ cc_status.mdep.value =
-+ gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx);
-+ cc_status.mdep.flags = CC_SET_VNCZ;
++ && REGNO (recog_data.operand[0]) == RETVAL_REGNUM)
++ {
++ cc_status.flags = 0;
++ cc_status.mdep.value =
++ gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx);
++ cc_status.mdep.flags = CC_SET_VNCZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
+
-+ }
++ }
+ break;
+ case CC_COMPARE:
-+ /* Check that compare will not be optimized away if so nothing should
-+ be done */
-+ if (is_compare_redundant (SET_SRC (exp), get_next_insn_cond (insn)) ==
-+ NULL_RTX)
-+ {
++ {
++ /* Check that compare will not be optimized away if so nothing should
++ be done */
++ rtx compare_exp = SET_SRC (exp);
++ /* Check if we have a tst expression. If so convert it to a
++ compare with 0. */
++ if ( REG_P (SET_SRC (exp)) )
++ compare_exp = gen_rtx_COMPARE (GET_MODE (SET_SRC (exp)),
++ SET_SRC (exp),
++ const0_rtx);
++
++ if (is_compare_redundant (compare_exp, get_next_insn_cond (insn)) ==
++ NULL_RTX)
++ {
+
-+ /* Reset the nonstandard flag */
-+ CC_STATUS_INIT;
-+ cc_status.flags = 0;
-+ cc_status.mdep.value = SET_SRC (exp);
-+ cc_status.mdep.flags = CC_SET_VNCZ;
-+ }
++ /* Reset the nonstandard flag */
++ CC_STATUS_INIT;
++ cc_status.flags = 0;
++ cc_status.mdep.value = compare_exp;
++ cc_status.mdep.flags = CC_SET_VNCZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
++ }
++ }
+ break;
+ case CC_CMP_COND_INSN:
-+ {
-+ /* Conditional insn that emit the compare itself. */
-+ rtx cmp = gen_rtx_COMPARE (GET_MODE (recog_data.operand[4]),
-+ recog_data.operand[4],
-+ recog_data.operand[5]);
++ {
++ /* Conditional insn that emit the compare itself. */
++ rtx cmp;
++ rtx cmp_op0, cmp_op1;
++ rtx cond;
++ rtx dest;
++ rtx next_insn = next_nonnote_insn (insn);
++
++ if ( GET_CODE (exp) == COND_EXEC )
++ {
++ cmp_op0 = XEXP (COND_EXEC_TEST (exp), 0);
++ cmp_op1 = XEXP (COND_EXEC_TEST (exp), 1);
++ cond = COND_EXEC_TEST (exp);
++ dest = SET_DEST (COND_EXEC_CODE (exp));
++ }
++ else
++ {
++ /* If then else conditional. compare operands are in operands
++ 4 and 5. */
++ cmp_op0 = recog_data.operand[4];
++ cmp_op1 = recog_data.operand[5];
++ cond = recog_data.operand[1];
++ dest = SET_DEST (exp);
++ }
+
-+ if (is_compare_redundant (cmp, recog_data.operand[1]) == NULL_RTX)
-+ {
++ if ( GET_CODE (cmp_op0) == AND )
++ cmp = cmp_op0;
++ else
++ cmp = gen_rtx_COMPARE (GET_MODE (cmp_op0),
++ cmp_op0,
++ cmp_op1);
++
++ /* Check if the conditional insns updates a register present
++ in the comparison, if so then we must reset the cc_status. */
++ if (REG_P (dest)
++ && (reg_mentioned_p (dest, cmp_op0)
++ || reg_mentioned_p (dest, cmp_op1))
++ && GET_CODE (exp) != COND_EXEC )
++ {
++ CC_STATUS_INIT;
++ }
++ else if (is_compare_redundant (cmp, cond) == NULL_RTX)
++ {
++ /* Reset the nonstandard flag */
++ CC_STATUS_INIT;
++ if ( GET_CODE (cmp_op0) == AND )
++ {
++ cc_status.flags = CC_INVERTED;
++ cc_status.mdep.flags = CC_SET_Z;
++ }
++ else
++ {
++ cc_status.flags = 0;
++ cc_status.mdep.flags = CC_SET_VNCZ;
++ }
++ cc_status.mdep.value = cmp;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
++ }
+
-+ /* Reset the nonstandard flag */
-+ CC_STATUS_INIT;
-+ cc_status.flags = 0;
-+ cc_status.mdep.value = cmp;
-+ cc_status.mdep.flags = CC_SET_VNCZ;
-+ }
-+ }
-+ break;
++
++ /* Check if we have a COND_EXEC insn which updates one
++ of the registers in the compare status. */
++ if (REG_P (dest)
++ && (reg_mentioned_p (dest, cmp_op0)
++ || reg_mentioned_p (dest, cmp_op1))
++ && GET_CODE (exp) == COND_EXEC )
++ cc_status.mdep.cond_exec_cmp_clobbered = 1;
++
++ if ( cc_status.mdep.cond_exec_cmp_clobbered
++ && GET_CODE (exp) == COND_EXEC
++ && next_insn != NULL
++ && INSN_P (next_insn)
++ && !(GET_CODE (PATTERN (next_insn)) == COND_EXEC
++ && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0), cmp_op0)
++ && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1), cmp_op1)
++ && (GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == GET_CODE (cond)
++ || GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == reverse_condition (GET_CODE (cond)))) )
++ {
++ /* We have a sequence of conditional insns where the compare status has been clobbered
++ since the compare no longer reflects the content of the values to compare. */
++ CC_STATUS_INIT;
++ cc_status.mdep.cond_exec_cmp_clobbered = 1;
++ }
++
++ }
++ break;
+ case CC_FPCOMPARE:
+ /* Check that floating-point compare will not be optimized away if so
+ nothing should be done */
+ if (!rtx_equal_p (cc_prev_status.mdep.fpvalue, SET_SRC (exp)))
-+ {
-+ /* cc0 already contains the correct comparison -> delete cmp insn */
-+ /* Reset the nonstandard flag */
-+ cc_status.mdep.fpvalue = SET_SRC (exp);
-+ cc_status.mdep.fpflags = CC_SET_CZ;
-+ }
++ {
++ /* cc0 already contains the correct comparison -> delete cmp insn */
++ /* Reset the nonstandard flag */
++ cc_status.mdep.fpvalue = SET_SRC (exp);
++ cc_status.mdep.fpflags = CC_SET_CZ;
++ }
+ break;
+ case CC_FROM_FPCC:
+ /* Flags are updated with flags from Floating-point coprocessor, set
+ cc_status.flags = CC_INVERTED;
+ cc_status.mdep.value = SET_SRC (exp);
+ cc_status.mdep.flags = CC_SET_Z;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
+ break;
+ case CC_NONE:
+ /* Insn does not affect CC at all. Check if the instruction updates
+ some of the register currently reflected in cc0 */
+
+ if ((GET_CODE (exp) == SET)
-+ && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value)
-+ && (reg_mentioned_p (SET_DEST (exp), cc_status.value1)
-+ || reg_mentioned_p (SET_DEST (exp), cc_status.value2)
-+ || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value)))
-+ {
-+ CC_STATUS_INIT;
-+ }
++ && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value)
++ && (reg_mentioned_p (SET_DEST (exp), cc_status.value1)
++ || reg_mentioned_p (SET_DEST (exp), cc_status.value2)
++ || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value)))
++ {
++ CC_STATUS_INIT;
++ }
+
+ /* If this is a parallel we must step through each of the parallel
+ expressions */
+ if (GET_CODE (exp) == PARALLEL)
-+ {
-+ int i;
-+ for (i = 0; i < XVECLEN (exp, 0); ++i)
-+ {
-+ rtx vec_exp = XVECEXP (exp, 0, i);
-+ if ((GET_CODE (vec_exp) == SET)
-+ && (cc_status.value1 || cc_status.value2
-+ || cc_status.mdep.value)
-+ && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1)
-+ || reg_mentioned_p (SET_DEST (vec_exp),
-+ cc_status.value2)
-+ || reg_mentioned_p (SET_DEST (vec_exp),
-+ cc_status.mdep.value)))
-+ {
-+ CC_STATUS_INIT;
-+ }
-+ }
-+ }
++ {
++ int i;
++ for (i = 0; i < XVECLEN (exp, 0); ++i)
++ {
++ rtx vec_exp = XVECEXP (exp, 0, i);
++ if ((GET_CODE (vec_exp) == SET)
++ && (cc_status.value1 || cc_status.value2
++ || cc_status.mdep.value)
++ && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1)
++ || reg_mentioned_p (SET_DEST (vec_exp),
++ cc_status.value2)
++ || reg_mentioned_p (SET_DEST (vec_exp),
++ cc_status.mdep.value)))
++ {
++ CC_STATUS_INIT;
++ }
++ }
++ }
+
+ /* Check if we have memory opartions with post_inc or pre_dec on the
+ register currently reflected in cc0 */
+ if (GET_CODE (exp) == SET
-+ && GET_CODE (SET_SRC (exp)) == MEM
-+ && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC
-+ || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC)
-+ &&
-+ (reg_mentioned_p
-+ (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1)
-+ || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
-+ cc_status.value2)
-+ || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
-+ cc_status.mdep.value)))
-+ CC_STATUS_INIT;
++ && GET_CODE (SET_SRC (exp)) == MEM
++ && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC
++ || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC)
++ &&
++ (reg_mentioned_p
++ (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1)
++ || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
++ cc_status.value2)
++ || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
++ cc_status.mdep.value)))
++ CC_STATUS_INIT;
+
+ if (GET_CODE (exp) == SET
-+ && GET_CODE (SET_DEST (exp)) == MEM
-+ && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC
-+ || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC)
-+ &&
-+ (reg_mentioned_p
-+ (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1)
-+ || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
-+ cc_status.value2)
-+ || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
-+ cc_status.mdep.value)))
-+ CC_STATUS_INIT;
++ && GET_CODE (SET_DEST (exp)) == MEM
++ && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC
++ || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC)
++ &&
++ (reg_mentioned_p
++ (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1)
++ || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
++ cc_status.value2)
++ || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
++ cc_status.mdep.value)))
++ CC_STATUS_INIT;
+ break;
+
+ case CC_SET_VNCZ:
+ CC_STATUS_INIT;
+ cc_status.mdep.value = recog_data.operand[0];
+ cc_status.mdep.flags = CC_SET_VNCZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
+ break;
+
+ case CC_SET_NCZ:
+ CC_STATUS_INIT;
+ cc_status.mdep.value = recog_data.operand[0];
+ cc_status.mdep.flags = CC_SET_NCZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
+ break;
+
+ case CC_SET_CZ:
+ CC_STATUS_INIT;
+ cc_status.mdep.value = recog_data.operand[0];
+ cc_status.mdep.flags = CC_SET_CZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
+ break;
+
+ case CC_SET_Z:
+ CC_STATUS_INIT;
+ cc_status.mdep.value = recog_data.operand[0];
+ cc_status.mdep.flags = CC_SET_Z;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
+ break;
+
+ case CC_CLOBBER:
+ default:
+ CC_STATUS_INIT;
+ }
-+ }
++}
+
+
+/*
+ Outputs to stdio stream stream the assembler syntax for an instruction
+ operand x. x is an RTL expression.
-+ */
++*/
+void
+avr32_print_operand (FILE * stream, rtx x, int code)
-+ {
-+ int error = 0;
++{
++ int error = 0;
++
++ if ( code == '?' )
++ {
++ /* Predicable instruction, print condition code */
++
++ /* If the insn should not be conditional then do nothing. */
++ if ( current_insn_predicate == NULL_RTX )
++ return;
++
++ /* Set x to the predicate to force printing
++ the condition later on. */
++ x = current_insn_predicate;
++
++ /* Reverse condition if useing bld insn. */
++ if ( GET_CODE (XEXP(current_insn_predicate,0)) == AND )
++ x = reversed_condition (current_insn_predicate);
++ }
++ else if ( code == '!' )
++ {
++ /* Output compare for conditional insn if needed. */
++ rtx new_cond;
++ gcc_assert ( current_insn_predicate != NULL_RTX );
++ new_cond = avr32_output_cmp(current_insn_predicate,
++ GET_MODE(XEXP(current_insn_predicate,0)),
++ XEXP(current_insn_predicate,0),
++ XEXP(current_insn_predicate,1));
++
++ /* Check if the new condition is a special avr32 condition
++ specified using UNSPECs. If so we must handle it differently. */
++ if ( GET_CODE (new_cond) == UNSPEC )
++ {
++ current_insn_predicate =
++ gen_rtx_UNSPEC (CCmode,
++ gen_rtvec (2,
++ XEXP(current_insn_predicate,0),
++ XEXP(current_insn_predicate,1)),
++ XINT (new_cond, 1));
++ }
++ else
++ {
++ PUT_CODE(current_insn_predicate, GET_CODE(new_cond));
++ }
++ return;
++ }
+
-+ switch (GET_CODE (x))
++ switch (GET_CODE (x))
+ {
+ case UNSPEC:
+ switch (XINT (x, 1))
-+ {
-+ case UNSPEC_COND_PL:
-+ if (code == 'i')
-+ fputs ("mi", stream);
-+ else
-+ fputs ("pl", stream);
-+ break;
-+ case UNSPEC_COND_MI:
-+ if (code == 'i')
-+ fputs ("pl", stream);
-+ else
-+ fputs ("mi", stream);
-+ break;
-+ default:
-+ error = 1;
-+ }
++ {
++ case UNSPEC_COND_PL:
++ if (code == 'i')
++ fputs ("mi", stream);
++ else
++ fputs ("pl", stream);
++ break;
++ case UNSPEC_COND_MI:
++ if (code == 'i')
++ fputs ("pl", stream);
++ else
++ fputs ("mi", stream);
++ break;
++ default:
++ error = 1;
++ }
+ break;
+ case EQ:
+ if (code == 'i')
-+ fputs ("ne", stream);
++ fputs ("ne", stream);
+ else
-+ fputs ("eq", stream);
++ fputs ("eq", stream);
+ break;
+ case NE:
+ if (code == 'i')
-+ fputs ("eq", stream);
++ fputs ("eq", stream);
+ else
-+ fputs ("ne", stream);
++ fputs ("ne", stream);
+ break;
+ case GT:
+ if (code == 'i')
-+ fputs ("le", stream);
++ fputs ("le", stream);
+ else
-+ fputs ("gt", stream);
++ fputs ("gt", stream);
+ break;
+ case GTU:
+ if (code == 'i')
-+ fputs ("ls", stream);
++ fputs ("ls", stream);
+ else
-+ fputs ("hi", stream);
++ fputs ("hi", stream);
+ break;
+ case LT:
+ if (code == 'i')
-+ fputs ("ge", stream);
++ fputs ("ge", stream);
+ else
-+ fputs ("lt", stream);
++ fputs ("lt", stream);
+ break;
+ case LTU:
+ if (code == 'i')
-+ fputs ("hs", stream);
++ fputs ("hs", stream);
+ else
-+ fputs ("lo", stream);
++ fputs ("lo", stream);
+ break;
+ case GE:
+ if (code == 'i')
-+ fputs ("lt", stream);
++ fputs ("lt", stream);
+ else
-+ fputs ("ge", stream);
++ fputs ("ge", stream);
+ break;
+ case GEU:
+ if (code == 'i')
-+ fputs ("lo", stream);
++ fputs ("lo", stream);
+ else
-+ fputs ("hs", stream);
++ fputs ("hs", stream);
+ break;
+ case LE:
+ if (code == 'i')
-+ fputs ("gt", stream);
++ fputs ("gt", stream);
+ else
-+ fputs ("le", stream);
++ fputs ("le", stream);
+ break;
+ case LEU:
+ if (code == 'i')
-+ fputs ("hi", stream);
++ fputs ("hi", stream);
+ else
-+ fputs ("ls", stream);
++ fputs ("ls", stream);
+ break;
+ case CONST_INT:
-+ {
-+ HOST_WIDE_INT value = INTVAL (x);
++ {
++ HOST_WIDE_INT value = INTVAL (x);
+
-+ switch (code)
++ switch (code)
+ {
+ case 'm':
+ if ( HOST_BITS_PER_WIDE_INT > BITS_PER_WORD )
+ value = bitpos;
+ }
+ break;
++ case 'z':
++ {
++ /* Set to bit position of first bit cleared in immediate */
++ int i, bitpos = 32;
++ for (i = 0; i < 32; i++)
++ if (!(value & (1 << i)))
++ {
++ bitpos = i;
++ break;
++ }
++ value = bitpos;
++ }
++ break;
+ case 'r':
+ {
+ /* Reglist 8 */
+ char op[50];
+ op[0] = '\0';
-+
++
+ if (value & 0x01)
+ sprintf (op, "r0-r3");
+ if (value & 0x02)
+ strlen (op) ? sprintf (op, "%s, lr", op) : sprintf (op, "lr");
+ if (value & 0x80)
+ strlen (op) ? sprintf (op, "%s, pc", op) : sprintf (op, "pc");
-+
++
+ fputs (op, stream);
+ return;
+ }
+ int i;
+ reglist16_string[0] = '\0';
+
-+ for (i = 0; i < 16; ++i)
-+ {
-+ if (value & (1 << i))
-+ {
-+ strlen (reglist16_string) ? sprintf (reglist16_string,
-+ "%s, %s",
-+ reglist16_string,
-+ reg_names
-+ [INTERNAL_REGNUM
-+ (i)]) :
-+ sprintf (reglist16_string, "%s",
-+ reg_names[INTERNAL_REGNUM (i)]);
-+ }
-+ }
-+ fputs (reglist16_string, stream);
-+ return;
-+ }
-+ case 'C':
-+ {
-+ /* RegListCP8 */
-+ char reglist_string[100];
-+ avr32_make_fp_reglist_w (value, (char *) reglist_string);
-+ fputs (reglist_string, stream);
-+ return;
-+ }
-+ case 'D':
-+ {
-+ /* RegListCPD8 */
-+ char reglist_string[100];
-+ avr32_make_fp_reglist_d (value, (char *) reglist_string);
-+ fputs (reglist_string, stream);
-+ return;
-+ }
-+ case 'h':
-+ /* Print halfword part of word */
-+ fputs (value ? "b" : "t", stream);
-+ return;
-+ }
++ for (i = 0; i < 16; ++i)
++ {
++ if (value & (1 << i))
++ {
++ strlen (reglist16_string) ? sprintf (reglist16_string,
++ "%s, %s",
++ reglist16_string,
++ reg_names
++ [INTERNAL_REGNUM
++ (i)]) :
++ sprintf (reglist16_string, "%s",
++ reg_names[INTERNAL_REGNUM (i)]);
++ }
++ }
++ fputs (reglist16_string, stream);
++ return;
++ }
++ case 'C':
++ {
++ /* RegListCP8 */
++ char reglist_string[100];
++ avr32_make_fp_reglist_w (value, (char *) reglist_string);
++ fputs (reglist_string, stream);
++ return;
++ }
++ case 'D':
++ {
++ /* RegListCPD8 */
++ char reglist_string[100];
++ avr32_make_fp_reglist_d (value, (char *) reglist_string);
++ fputs (reglist_string, stream);
++ return;
++ }
++ case 'h':
++ /* Print halfword part of word */
++ fputs (value ? "b" : "t", stream);
++ return;
++ }
+
-+ /* Print Value */
-+ fprintf (stream, "%d", value);
-+ break;
-+ }
++ /* Print Value */
++ fprintf (stream, "%d", value);
++ break;
++ }
+ case CONST_DOUBLE:
-+ {
-+ HOST_WIDE_INT hi, lo;
-+ if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
-+ {
-+ HOST_WIDE_INT target_float[2];
-+ hi = lo = 0;
-+ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x),
-+ GET_MODE (x));
-+ /* For doubles the most significant part starts at index 0. */
-+ if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
-+ {
-+ hi = target_float[0];
-+ lo = target_float[1];
-+ }
-+ else
-+ {
-+ lo = target_float[0];
-+ }
-+ }
-+ else
-+ {
-+ hi = CONST_DOUBLE_HIGH (x);
-+ lo = CONST_DOUBLE_LOW (x);
-+ }
++ {
++ HOST_WIDE_INT hi, lo;
++ if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
++ {
++ HOST_WIDE_INT target_float[2];
++ hi = lo = 0;
++ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x),
++ GET_MODE (x));
++ /* For doubles the most significant part starts at index 0. */
++ if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
++ {
++ hi = target_float[0];
++ lo = target_float[1];
++ }
++ else
++ {
++ lo = target_float[0];
++ }
++ }
++ else
++ {
++ hi = CONST_DOUBLE_HIGH (x);
++ lo = CONST_DOUBLE_LOW (x);
++ }
+
-+ if (code == 'm')
-+ fprintf (stream, "%ld", hi);
-+ else
-+ fprintf (stream, "%ld", lo);
++ if (code == 'm')
++ fprintf (stream, "%ld", hi);
++ else
++ fprintf (stream, "%ld", lo);
+
-+ break;
-+ }
++ break;
++ }
+ case CONST:
+ output_addr_const (stream, XEXP (XEXP (x, 0), 0));
+ fprintf (stream, "+%ld", INTVAL (XEXP (XEXP (x, 0), 1)));
+ case REG:
+ /* Swap register name if the register is DImode or DFmode. */
+ if (GET_MODE (x) == DImode || GET_MODE (x) == DFmode)
-+ {
-+ /* Double register must have an even numbered address */
-+ gcc_assert (!(REGNO (x) % 2));
-+ if (code == 'm')
-+ fputs (reg_names[true_regnum (x)], stream);
-+ else
-+ fputs (reg_names[true_regnum (x) + 1], stream);
-+ }
++ {
++ /* Double register must have an even numbered address */
++ gcc_assert (!(REGNO (x) % 2));
++ if (code == 'm')
++ fputs (reg_names[true_regnum (x)], stream);
++ else
++ fputs (reg_names[true_regnum (x) + 1], stream);
++ }
+ else if (GET_MODE (x) == TImode)
-+ {
-+ switch (code)
-+ {
-+ case 'T':
-+ fputs (reg_names[true_regnum (x)], stream);
-+ break;
-+ case 'U':
-+ fputs (reg_names[true_regnum (x) + 1], stream);
-+ break;
-+ case 'L':
-+ fputs (reg_names[true_regnum (x) + 2], stream);
-+ break;
-+ case 'B':
-+ fputs (reg_names[true_regnum (x) + 3], stream);
-+ break;
-+ default:
-+ fprintf (stream, "%s, %s, %s, %s",
-+ reg_names[true_regnum (x) + 3],
-+ reg_names[true_regnum (x) + 2],
-+ reg_names[true_regnum (x) + 1],
-+ reg_names[true_regnum (x)]);
-+ break;
-+ }
-+ }
++ {
++ switch (code)
++ {
++ case 'T':
++ fputs (reg_names[true_regnum (x)], stream);
++ break;
++ case 'U':
++ fputs (reg_names[true_regnum (x) + 1], stream);
++ break;
++ case 'L':
++ fputs (reg_names[true_regnum (x) + 2], stream);
++ break;
++ case 'B':
++ fputs (reg_names[true_regnum (x) + 3], stream);
++ break;
++ default:
++ fprintf (stream, "%s, %s, %s, %s",
++ reg_names[true_regnum (x) + 3],
++ reg_names[true_regnum (x) + 2],
++ reg_names[true_regnum (x) + 1],
++ reg_names[true_regnum (x)]);
++ break;
++ }
++ }
+ else
-+ {
-+ fputs (reg_names[true_regnum (x)], stream);
-+ }
++ {
++ fputs (reg_names[true_regnum (x)], stream);
++ }
+ break;
+ case CODE_LABEL:
+ case LABEL_REF:
+ break;
+ case MEM:
+ switch (GET_CODE (XEXP (x, 0)))
-+ {
-+ case LABEL_REF:
-+ case SYMBOL_REF:
-+ output_addr_const (stream, XEXP (x, 0));
-+ break;
-+ case MEM:
-+ switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
-+ {
-+ case SYMBOL_REF:
-+ output_addr_const (stream, XEXP (XEXP (x, 0), 0));
-+ break;
-+ default:
-+ error = 1;
-+ break;
-+ }
-+ break;
-+ case REG:
-+ avr32_print_operand (stream, XEXP (x, 0), 0);
-+ if (code != 'p')
-+ fputs ("[0]", stream);
-+ break;
-+ case PRE_DEC:
-+ fputs ("--", stream);
-+ avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
-+ break;
-+ case POST_INC:
-+ avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
-+ fputs ("++", stream);
-+ break;
-+ case PLUS:
-+ {
-+ rtx op0 = XEXP (XEXP (x, 0), 0);
-+ rtx op1 = XEXP (XEXP (x, 0), 1);
-+ rtx base = NULL_RTX, offset = NULL_RTX;
-+
-+ if (avr32_address_register_rtx_p (op0, 1))
-+ {
-+ base = op0;
-+ offset = op1;
-+ }
-+ else if (avr32_address_register_rtx_p (op1, 1))
-+ {
-+ /* Operands are switched. */
-+ base = op1;
-+ offset = op0;
-+ }
++ {
++ case LABEL_REF:
++ case SYMBOL_REF:
++ output_addr_const (stream, XEXP (x, 0));
++ break;
++ case MEM:
++ switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
++ {
++ case SYMBOL_REF:
++ output_addr_const (stream, XEXP (XEXP (x, 0), 0));
++ break;
++ default:
++ error = 1;
++ break;
++ }
++ break;
++ case REG:
++ avr32_print_operand (stream, XEXP (x, 0), 0);
++ if (code != 'p')
++ fputs ("[0]", stream);
++ break;
++ case PRE_DEC:
++ fputs ("--", stream);
++ avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
++ break;
++ case POST_INC:
++ avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
++ fputs ("++", stream);
++ break;
++ case PLUS:
++ {
++ rtx op0 = XEXP (XEXP (x, 0), 0);
++ rtx op1 = XEXP (XEXP (x, 0), 1);
++ rtx base = NULL_RTX, offset = NULL_RTX;
++
++ if (avr32_address_register_rtx_p (op0, 1))
++ {
++ base = op0;
++ offset = op1;
++ }
++ else if (avr32_address_register_rtx_p (op1, 1))
++ {
++ /* Operands are switched. */
++ base = op1;
++ offset = op0;
++ }
+
-+ gcc_assert (base && offset
-+ && avr32_address_register_rtx_p (base, 1)
-+ && avr32_legitimate_index_p (GET_MODE (x), offset,
-+ 1));
++ gcc_assert (base && offset
++ && avr32_address_register_rtx_p (base, 1)
++ && avr32_legitimate_index_p (GET_MODE (x), offset,
++ 1));
+
-+ avr32_print_operand (stream, base, 0);
-+ fputs ("[", stream);
-+ avr32_print_operand (stream, offset, 0);
-+ fputs ("]", stream);
-+ break;
-+ }
-+ case CONST:
-+ output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0));
-+ fprintf (stream, " + %ld",
-+ INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)));
-+ break;
-+ default:
-+ error = 1;
-+ }
++ avr32_print_operand (stream, base, 0);
++ fputs ("[", stream);
++ avr32_print_operand (stream, offset, 0);
++ fputs ("]", stream);
++ break;
++ }
++ case CONST:
++ output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0));
++ fprintf (stream, " + %ld",
++ INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)));
++ break;
++ case CONST_INT:
++ avr32_print_operand (stream, XEXP (x, 0), 0);
++ break;
++ default:
++ error = 1;
++ }
+ break;
+ case MULT:
-+ {
-+ int value = INTVAL (XEXP (x, 1));
-+
-+ /* Convert immediate in multiplication into a shift immediate */
-+ switch (value)
-+ {
-+ case 2:
-+ value = 1;
-+ break;
-+ case 4:
-+ value = 2;
-+ break;
-+ case 8:
-+ value = 3;
-+ break;
-+ default:
-+ value = 0;
-+ }
-+ fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
-+ value);
-+ break;
-+ }
++ {
++ int value = INTVAL (XEXP (x, 1));
++
++ /* Convert immediate in multiplication into a shift immediate */
++ switch (value)
++ {
++ case 2:
++ value = 1;
++ break;
++ case 4:
++ value = 2;
++ break;
++ case 8:
++ value = 3;
++ break;
++ default:
++ value = 0;
++ }
++ fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
++ value);
++ break;
++ }
+ case ASHIFT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
-+ fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
-+ (int) INTVAL (XEXP (x, 1)));
++ fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
++ (int) INTVAL (XEXP (x, 1)));
+ else if (REG_P (XEXP (x, 1)))
-+ fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))],
-+ reg_names[true_regnum (XEXP (x, 1))]);
++ fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))],
++ reg_names[true_regnum (XEXP (x, 1))]);
+ else
-+ {
-+ error = 1;
-+ }
++ {
++ error = 1;
++ }
+ break;
+ case LSHIFTRT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
-+ fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))],
-+ (int) INTVAL (XEXP (x, 1)));
++ fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))],
++ (int) INTVAL (XEXP (x, 1)));
+ else if (REG_P (XEXP (x, 1)))
-+ fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))],
-+ reg_names[true_regnum (XEXP (x, 1))]);
++ fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))],
++ reg_names[true_regnum (XEXP (x, 1))]);
+ else
-+ {
-+ error = 1;
-+ }
++ {
++ error = 1;
++ }
+ fprintf (stream, ">>");
+ break;
+ case PARALLEL:
-+ {
-+ /* Load store multiple */
-+ int i;
-+ int count = XVECLEN (x, 0);
-+ int reglist16 = 0;
-+ char reglist16_string[100];
++ {
++ /* Load store multiple */
++ int i;
++ int count = XVECLEN (x, 0);
++ int reglist16 = 0;
++ char reglist16_string[100];
++
++ for (i = 0; i < count; ++i)
++ {
++ rtx vec_elm = XVECEXP (x, 0, i);
++ if (GET_MODE (vec_elm) != SET)
++ {
++ debug_rtx (vec_elm);
++ internal_error ("Unknown element in parallel expression!");
++ }
++ if (GET_MODE (XEXP (vec_elm, 0)) == REG)
++ {
++ /* Load multiple */
++ reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0)));
++ }
++ else
++ {
++ /* Store multiple */
++ reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1)));
++ }
++ }
+
-+ for (i = 0; i < count; ++i)
-+ {
-+ rtx vec_elm = XVECEXP (x, 0, i);
-+ if (GET_MODE (vec_elm) != SET)
-+ {
-+ debug_rtx (vec_elm);
-+ internal_error ("Unknown element in parallel expression!");
-+ }
-+ if (GET_MODE (XEXP (vec_elm, 0)) == REG)
-+ {
-+ /* Load multiple */
-+ reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0)));
-+ }
-+ else
-+ {
-+ /* Store multiple */
-+ reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1)));
-+ }
-+ }
++ avr32_make_reglist16 (reglist16, reglist16_string);
++ fputs (reglist16_string, stream);
+
-+ avr32_make_reglist16 (reglist16, reglist16_string);
-+ fputs (reglist16_string, stream);
++ break;
++ }
+
-+ break;
-+ }
++ case PLUS:
++ {
++ rtx op0 = XEXP (x, 0);
++ rtx op1 = XEXP (x, 1);
++ rtx base = NULL_RTX, offset = NULL_RTX;
++
++ if (avr32_address_register_rtx_p (op0, 1))
++ {
++ base = op0;
++ offset = op1;
++ }
++ else if (avr32_address_register_rtx_p (op1, 1))
++ {
++ /* Operands are switched. */
++ base = op1;
++ offset = op0;
++ }
++
++ gcc_assert (base && offset
++ && avr32_address_register_rtx_p (base, 1)
++ && avr32_legitimate_index_p (GET_MODE (x), offset, 1));
++
++ avr32_print_operand (stream, base, 0);
++ fputs ("[", stream);
++ avr32_print_operand (stream, offset, 0);
++ fputs ("]", stream);
++ break;
++ }
+
+ default:
+ error = 1;
+ }
+
-+ if (error)
-+ {
-+ debug_rtx (x);
-+ internal_error ("Illegal expression for avr32_print_operand");
-+ }
-+ }
++ if (error)
++ {
++ debug_rtx (x);
++ internal_error ("Illegal expression for avr32_print_operand");
++ }
++}
+
+rtx
+avr32_get_note_reg_equiv (rtx insn)
-+ {
-+ rtx note;
++{
++ rtx note;
+
-+ note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
++ note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
+
-+ if (note != NULL_RTX)
-+ return XEXP (note, 0);
-+ else
-+ return NULL_RTX;
-+ }
++ if (note != NULL_RTX)
++ return XEXP (note, 0);
++ else
++ return NULL_RTX;
++}
+
+/*
+ Outputs to stdio stream stream the assembler syntax for an instruction
+ expression.
+
+ ToDo: fixme.
-+ */
++*/
+void
+avr32_print_operand_address (FILE * stream, rtx x)
-+ {
-+ fprintf (stream, "(%d) /* address */", REGNO (x));
-+ }
++{
++ fprintf (stream, "(%d) /* address */", REGNO (x));
++}
+
+/* Return true if _GLOBAL_OFFSET_TABLE_ symbol is mentioned. */
+bool
+avr32_got_mentioned_p (rtx addr)
-+ {
-+ if (GET_CODE (addr) == MEM)
-+ addr = XEXP (addr, 0);
-+ while (GET_CODE (addr) == CONST)
-+ addr = XEXP (addr, 0);
-+ if (GET_CODE (addr) == SYMBOL_REF)
-+ {
-+ return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_");
-+ }
-+ if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS)
-+ {
-+ bool l1, l2;
++{
++ if (GET_CODE (addr) == MEM)
++ addr = XEXP (addr, 0);
++ while (GET_CODE (addr) == CONST)
++ addr = XEXP (addr, 0);
++ if (GET_CODE (addr) == SYMBOL_REF)
++ {
++ return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_");
++ }
++ if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS)
++ {
++ bool l1, l2;
+
-+ l1 = avr32_got_mentioned_p (XEXP (addr, 0));
-+ l2 = avr32_got_mentioned_p (XEXP (addr, 1));
-+ return l1 || l2;
-+ }
-+ return false;
-+ }
++ l1 = avr32_got_mentioned_p (XEXP (addr, 0));
++ l2 = avr32_got_mentioned_p (XEXP (addr, 1));
++ return l1 || l2;
++ }
++ return false;
++}
+
+
+/* Find the symbol in an address expression. */
+
+rtx
+avr32_find_symbol (rtx addr)
-+ {
-+ if (GET_CODE (addr) == MEM)
-+ addr = XEXP (addr, 0);
++{
++ if (GET_CODE (addr) == MEM)
++ addr = XEXP (addr, 0);
+
-+ while (GET_CODE (addr) == CONST)
-+ addr = XEXP (addr, 0);
++ while (GET_CODE (addr) == CONST)
++ addr = XEXP (addr, 0);
+
-+ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
-+ return addr;
-+ if (GET_CODE (addr) == PLUS)
-+ {
-+ rtx l1, l2;
-+
-+ l1 = avr32_find_symbol (XEXP (addr, 0));
-+ l2 = avr32_find_symbol (XEXP (addr, 1));
-+ if (l1 != NULL_RTX && l2 == NULL_RTX)
-+ return l1;
-+ else if (l1 == NULL_RTX && l2 != NULL_RTX)
-+ return l2;
-+ }
++ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
++ return addr;
++ if (GET_CODE (addr) == PLUS)
++ {
++ rtx l1, l2;
++
++ l1 = avr32_find_symbol (XEXP (addr, 0));
++ l2 = avr32_find_symbol (XEXP (addr, 1));
++ if (l1 != NULL_RTX && l2 == NULL_RTX)
++ return l1;
++ else if (l1 == NULL_RTX && l2 != NULL_RTX)
++ return l2;
++ }
+
-+ return NULL_RTX;
-+ }
++ return NULL_RTX;
++}
+
+
+/* Routines for manipulation of the constant pool. */
+/* Fixes less than a word need padding out to a word boundary. */
+#define MINIPOOL_FIX_SIZE(mode, value) \
+ (IS_FORCE_MINIPOOL(value) ? 0 : \
-+ (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4))
++ (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4))
+
+#define IS_FORCE_MINIPOOL(x) \
+ (GET_CODE(x) == UNSPEC && \
-+ XINT(x, 1) == UNSPEC_FORCE_MINIPOOL)
++ XINT(x, 1) == UNSPEC_FORCE_MINIPOOL)
+
+static Mnode *minipool_vector_head;
+static Mnode *minipool_vector_tail;
+ of the TABLE or NULL_RTX. */
+static rtx
+is_jump_table (rtx insn)
-+ {
-+ rtx table;
-+
-+ if (GET_CODE (insn) == JUMP_INSN
-+ && JUMP_LABEL (insn) != NULL
-+ && ((table = next_real_insn (JUMP_LABEL (insn)))
-+ == next_real_insn (insn))
-+ && table != NULL
-+ && GET_CODE (table) == JUMP_INSN
-+ && (GET_CODE (PATTERN (table)) == ADDR_VEC
-+ || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
-+ return table;
-+
-+ return NULL_RTX;
-+ }
++{
++ rtx table;
++
++ if (GET_CODE (insn) == JUMP_INSN
++ && JUMP_LABEL (insn) != NULL
++ && ((table = next_real_insn (JUMP_LABEL (insn)))
++ == next_real_insn (insn))
++ && table != NULL
++ && GET_CODE (table) == JUMP_INSN
++ && (GET_CODE (PATTERN (table)) == ADDR_VEC
++ || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
++ return table;
++
++ return NULL_RTX;
++}
+
+static HOST_WIDE_INT
+get_jump_table_size (rtx insn)
-+ {
-+ /* ADDR_VECs only take room if read-only data does into the text section. */
-+ if (JUMP_TABLES_IN_TEXT_SECTION
++{
++ /* ADDR_VECs only take room if read-only data does into the text section. */
++ if (JUMP_TABLES_IN_TEXT_SECTION
+#if !defined(READONLY_DATA_SECTION_ASM_OP)
-+ || 1
++ || 1
+#endif
+ )
-+ {
-+ rtx body = PATTERN (insn);
-+ int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
++ {
++ rtx body = PATTERN (insn);
++ int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
+
-+ return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
-+ }
++ return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
++ }
+
-+ return 0;
-+ }
++ return 0;
++}
+
+/* Move a minipool fix MP from its current location to before MAX_MP.
+ If MAX_MP is NULL, then MP doesn't need moving, but the addressing
+ constraints may need updating. */
+static Mnode *
+move_minipool_fix_forward_ref (Mnode * mp, Mnode * max_mp,
-+ HOST_WIDE_INT max_address)
-+ {
-+ /* This should never be true and the code below assumes these are
++ HOST_WIDE_INT max_address)
++{
++ /* This should never be true and the code below assumes these are
+ different. */
-+ if (mp == max_mp)
-+ abort ();
++ if (mp == max_mp)
++ abort ();
+
-+ if (max_mp == NULL)
-+ {
-+ if (max_address < mp->max_address)
-+ mp->max_address = max_address;
-+ }
++ if (max_mp == NULL)
++ {
++ if (max_address < mp->max_address)
++ mp->max_address = max_address;
++ }
++ else
++ {
++ if (max_address > max_mp->max_address - mp->fix_size)
++ mp->max_address = max_mp->max_address - mp->fix_size;
+ else
-+ {
-+ if (max_address > max_mp->max_address - mp->fix_size)
-+ mp->max_address = max_mp->max_address - mp->fix_size;
-+ else
-+ mp->max_address = max_address;
++ mp->max_address = max_address;
+
-+ /* Unlink MP from its current position. Since max_mp is non-null,
++ /* Unlink MP from its current position. Since max_mp is non-null,
+ mp->prev must be non-null. */
-+ mp->prev->next = mp->next;
-+ if (mp->next != NULL)
-+ mp->next->prev = mp->prev;
-+ else
-+ minipool_vector_tail = mp->prev;
-+
-+ /* Re-insert it before MAX_MP. */
-+ mp->next = max_mp;
-+ mp->prev = max_mp->prev;
-+ max_mp->prev = mp;
++ mp->prev->next = mp->next;
++ if (mp->next != NULL)
++ mp->next->prev = mp->prev;
++ else
++ minipool_vector_tail = mp->prev;
+
-+ if (mp->prev != NULL)
-+ mp->prev->next = mp;
-+ else
-+ minipool_vector_head = mp;
-+ }
++ /* Re-insert it before MAX_MP. */
++ mp->next = max_mp;
++ mp->prev = max_mp->prev;
++ max_mp->prev = mp;
+
-+ /* Save the new entry. */
-+ max_mp = mp;
++ if (mp->prev != NULL)
++ mp->prev->next = mp;
++ else
++ minipool_vector_head = mp;
++ }
+
-+ /* Scan over the preceding entries and adjust their addresses as required.
-+ */
-+ while (mp->prev != NULL
-+ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
-+ {
-+ mp->prev->max_address = mp->max_address - mp->prev->fix_size;
-+ mp = mp->prev;
-+ }
++ /* Save the new entry. */
++ max_mp = mp;
+
-+ return max_mp;
++ /* Scan over the preceding entries and adjust their addresses as required.
++ */
++ while (mp->prev != NULL
++ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
++ {
++ mp->prev->max_address = mp->max_address - mp->prev->fix_size;
++ mp = mp->prev;
+ }
+
++ return max_mp;
++}
++
+/* Add a constant to the minipool for a forward reference. Returns the
+ node added or NULL if the constant will not fit in this pool. */
+static Mnode *
+ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
+ {
+ if (GET_CODE (fix->value) == GET_CODE (mp->value)
-+ && fix->mode == mp->mode
-+ && (GET_CODE (fix->value) != CODE_LABEL
-+ || (CODE_LABEL_NUMBER (fix->value)
-+ == CODE_LABEL_NUMBER (mp->value)))
-+ && rtx_equal_p (fix->value, mp->value))
-+ {
-+ /* More than one fix references this entry. */
-+ mp->refcount++;
-+ return move_minipool_fix_forward_ref (mp, max_mp, max_address);
-+ }
++ && fix->mode == mp->mode
++ && (GET_CODE (fix->value) != CODE_LABEL
++ || (CODE_LABEL_NUMBER (fix->value)
++ == CODE_LABEL_NUMBER (mp->value)))
++ && rtx_equal_p (fix->value, mp->value))
++ {
++ /* More than one fix references this entry. */
++ mp->refcount++;
++ return move_minipool_fix_forward_ref (mp, max_mp, max_address);
++ }
+
+ /* Note the insertion point if necessary. */
+ if (max_mp == NULL && mp->max_address > max_address)
-+ max_mp = mp;
++ max_mp = mp;
+
+ }
+
+ mp->prev = minipool_vector_tail;
+
+ if (mp->prev == NULL)
-+ {
-+ minipool_vector_head = mp;
-+ minipool_vector_label = gen_label_rtx ();
-+ }
++ {
++ minipool_vector_head = mp;
++ minipool_vector_label = gen_label_rtx ();
++ }
+ else
-+ mp->prev->next = mp;
++ mp->prev->next = mp;
+
+ minipool_vector_tail = mp;
+ }
+ else
+ {
+ if (max_address > max_mp->max_address - mp->fix_size)
-+ mp->max_address = max_mp->max_address - mp->fix_size;
++ mp->max_address = max_mp->max_address - mp->fix_size;
+ else
-+ mp->max_address = max_address;
++ mp->max_address = max_address;
+
+ mp->next = max_mp;
+ mp->prev = max_mp->prev;
+ max_mp->prev = mp;
+ if (mp->prev != NULL)
-+ mp->prev->next = mp;
++ mp->prev->next = mp;
+ else
-+ minipool_vector_head = mp;
++ minipool_vector_head = mp;
+ }
+
+ /* Save the new entry. */
+ /* Scan over the preceding entries and adjust their addresses as required.
+ */
+ while (mp->prev != NULL
-+ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
++ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
+ {
+ mp->prev->max_address = mp->max_address - mp->prev->fix_size;
+ mp = mp->prev;
+
+static Mnode *
+move_minipool_fix_backward_ref (Mnode * mp, Mnode * min_mp,
-+ HOST_WIDE_INT min_address)
-+ {
-+ HOST_WIDE_INT offset;
++ HOST_WIDE_INT min_address)
++{
++ HOST_WIDE_INT offset;
+
-+ /* This should never be true, and the code below assumes these are
++ /* This should never be true, and the code below assumes these are
+ different. */
-+ if (mp == min_mp)
-+ abort ();
++ if (mp == min_mp)
++ abort ();
+
-+ if (min_mp == NULL)
-+ {
-+ if (min_address > mp->min_address)
-+ mp->min_address = min_address;
-+ }
-+ else
-+ {
-+ /* We will adjust this below if it is too loose. */
-+ mp->min_address = min_address;
++ if (min_mp == NULL)
++ {
++ if (min_address > mp->min_address)
++ mp->min_address = min_address;
++ }
++ else
++ {
++ /* We will adjust this below if it is too loose. */
++ mp->min_address = min_address;
+
-+ /* Unlink MP from its current position. Since min_mp is non-null,
++ /* Unlink MP from its current position. Since min_mp is non-null,
+ mp->next must be non-null. */
-+ mp->next->prev = mp->prev;
-+ if (mp->prev != NULL)
-+ mp->prev->next = mp->next;
-+ else
-+ minipool_vector_head = mp->next;
-+
-+ /* Reinsert it after MIN_MP. */
-+ mp->prev = min_mp;
-+ mp->next = min_mp->next;
-+ min_mp->next = mp;
-+ if (mp->next != NULL)
-+ mp->next->prev = mp;
-+ else
-+ minipool_vector_tail = mp;
-+ }
++ mp->next->prev = mp->prev;
++ if (mp->prev != NULL)
++ mp->prev->next = mp->next;
++ else
++ minipool_vector_head = mp->next;
+
-+ min_mp = mp;
++ /* Reinsert it after MIN_MP. */
++ mp->prev = min_mp;
++ mp->next = min_mp->next;
++ min_mp->next = mp;
++ if (mp->next != NULL)
++ mp->next->prev = mp;
++ else
++ minipool_vector_tail = mp;
++ }
+
-+ offset = 0;
-+ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
-+ {
-+ mp->offset = offset;
-+ if (mp->refcount > 0)
-+ offset += mp->fix_size;
++ min_mp = mp;
+
-+ if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
-+ mp->next->min_address = mp->min_address + mp->fix_size;
-+ }
++ offset = 0;
++ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
++ {
++ mp->offset = offset;
++ if (mp->refcount > 0)
++ offset += mp->fix_size;
+
-+ return min_mp;
++ if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
++ mp->next->min_address = mp->min_address + mp->fix_size;
+ }
+
++ return min_mp;
++}
++
+/* Add a constant to the minipool for a backward reference. Returns the
+ node added or NULL if the constant will not fit in this pool.
+
+ range, then we don't try. This ensures that we can't fail later on. */
+ if (min_address >= minipool_barrier->address
+ || (minipool_vector_tail->min_address + fix->fix_size
-+ >= minipool_barrier->address))
++ >= minipool_barrier->address))
+ return NULL;
+
+ /* Scan the pool to see if a constant with the same value has already been
+ for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
+ {
+ if (GET_CODE (fix->value) == GET_CODE (mp->value)
-+ && fix->mode == mp->mode
-+ && (GET_CODE (fix->value) != CODE_LABEL
-+ || (CODE_LABEL_NUMBER (fix->value)
-+ == CODE_LABEL_NUMBER (mp->value)))
-+ && rtx_equal_p (fix->value, mp->value)
-+ /* Check that there is enough slack to move this entry to the end
++ && fix->mode == mp->mode
++ && (GET_CODE (fix->value) != CODE_LABEL
++ || (CODE_LABEL_NUMBER (fix->value)
++ == CODE_LABEL_NUMBER (mp->value)))
++ && rtx_equal_p (fix->value, mp->value)
++ /* Check that there is enough slack to move this entry to the end
+ of the table (this is conservative). */
-+ && (mp->max_address
-+ > (minipool_barrier->address
-+ + minipool_vector_tail->offset
-+ + minipool_vector_tail->fix_size)))
-+ {
-+ mp->refcount++;
-+ return move_minipool_fix_backward_ref (mp, min_mp, min_address);
-+ }
++ && (mp->max_address
++ > (minipool_barrier->address
++ + minipool_vector_tail->offset
++ + minipool_vector_tail->fix_size)))
++ {
++ mp->refcount++;
++ return move_minipool_fix_backward_ref (mp, min_mp, min_address);
++ }
+
+ if (min_mp != NULL)
-+ mp->min_address += fix->fix_size;
++ mp->min_address += fix->fix_size;
+ else
-+ {
-+ /* Note the insertion point if necessary. */
-+ if (mp->min_address < min_address)
-+ {
-+ min_mp = mp;
-+ }
-+ else if (mp->max_address
-+ < minipool_barrier->address + mp->offset + fix->fix_size)
-+ {
-+ /* Inserting before this entry would push the fix beyond its
++ {
++ /* Note the insertion point if necessary. */
++ if (mp->min_address < min_address)
++ {
++ min_mp = mp;
++ }
++ else if (mp->max_address
++ < minipool_barrier->address + mp->offset + fix->fix_size)
++ {
++ /* Inserting before this entry would push the fix beyond its
+ maximum address (which can happen if we have re-located a
+ forwards fix); force the new fix to come after it. */
-+ min_mp = mp;
-+ min_address = mp->min_address + fix->fix_size;
-+ }
-+ }
++ min_mp = mp;
++ min_address = mp->min_address + fix->fix_size;
++ }
++ }
+ }
+
+ /* We need to create a new entry. */
+ mp->next = minipool_vector_head;
+
+ if (mp->next == NULL)
-+ {
-+ minipool_vector_tail = mp;
-+ minipool_vector_label = gen_label_rtx ();
-+ }
++ {
++ minipool_vector_tail = mp;
++ minipool_vector_label = gen_label_rtx ();
++ }
+ else
-+ mp->next->prev = mp;
++ mp->next->prev = mp;
+
+ minipool_vector_head = mp;
+ }
+ min_mp->next = mp;
+
+ if (mp->next != NULL)
-+ mp->next->prev = mp;
++ mp->next->prev = mp;
+ else
-+ minipool_vector_tail = mp;
++ minipool_vector_tail = mp;
+ }
+
+ /* Save the new entry. */
+ while (mp->next != NULL)
+ {
+ if (mp->next->min_address < mp->min_address + mp->fix_size)
-+ mp->next->min_address = mp->min_address + mp->fix_size;
++ mp->next->min_address = mp->min_address + mp->fix_size;
+
+ if (mp->refcount)
-+ mp->next->offset = mp->offset + mp->fix_size;
++ mp->next->offset = mp->offset + mp->fix_size;
+ else
-+ mp->next->offset = mp->offset;
++ mp->next->offset = mp->offset;
+
+ mp = mp->next;
+ }
+
+static void
+assign_minipool_offsets (Mfix * barrier)
-+ {
-+ HOST_WIDE_INT offset = 0;
-+ Mnode *mp;
++{
++ HOST_WIDE_INT offset = 0;
++ Mnode *mp;
+
-+ minipool_barrier = barrier;
++ minipool_barrier = barrier;
+
-+ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
-+ {
-+ mp->offset = offset;
++ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
++ {
++ mp->offset = offset;
+
-+ if (mp->refcount > 0)
-+ offset += mp->fix_size;
-+ }
-+ }
++ if (mp->refcount > 0)
++ offset += mp->fix_size;
++ }
++}
+
+/* Print a symbolic form of X to the debug file, F. */
+static void
+avr32_print_value (FILE * f, rtx x)
-+ {
-+ switch (GET_CODE (x))
++{
++ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ fprintf (f, "0x%x", (int) INTVAL (x));
+ return;
+
+ case CONST_VECTOR:
-+ {
-+ int i;
-+
-+ fprintf (f, "<");
-+ for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
-+ {
-+ fprintf (f, "0x%x", (int) INTVAL (CONST_VECTOR_ELT (x, i)));
-+ if (i < (CONST_VECTOR_NUNITS (x) - 1))
-+ fputc (',', f);
-+ }
-+ fprintf (f, ">");
-+ }
-+ return;
++ {
++ int i;
++
++ fprintf (f, "<");
++ for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
++ {
++ fprintf (f, "0x%x", (int) INTVAL (CONST_VECTOR_ELT (x, i)));
++ if (i < (CONST_VECTOR_NUNITS (x) - 1))
++ fputc (',', f);
++ }
++ fprintf (f, ">");
++ }
++ return;
+
+ case CONST_STRING:
+ fprintf (f, "\"%s\"", XSTR (x, 0));
+ fprintf (f, "????");
+ return;
+ }
-+ }
++}
+
+int
+is_minipool_label (rtx label)
-+ {
-+ minipool_labels *cur_mp_label = cfun->machine->minipool_label_head;
-+
-+ if (GET_CODE (label) != CODE_LABEL)
-+ return FALSE;
++{
++ minipool_labels *cur_mp_label = cfun->machine->minipool_label_head;
+
-+ while (cur_mp_label)
-+ {
-+ if (CODE_LABEL_NUMBER (label)
-+ == CODE_LABEL_NUMBER (cur_mp_label->label))
-+ return TRUE;
-+ cur_mp_label = cur_mp_label->next;
-+ }
++ if (GET_CODE (label) != CODE_LABEL)
+ return FALSE;
-+ }
++
++ while (cur_mp_label)
++ {
++ if (CODE_LABEL_NUMBER (label)
++ == CODE_LABEL_NUMBER (cur_mp_label->label))
++ return TRUE;
++ cur_mp_label = cur_mp_label->next;
++ }
++ return FALSE;
++}
+
+static void
+new_minipool_label (rtx label)
-+ {
-+ if (!cfun->machine->minipool_label_head)
-+ {
-+ cfun->machine->minipool_label_head =
-+ ggc_alloc (sizeof (minipool_labels));
-+ cfun->machine->minipool_label_tail = cfun->machine->minipool_label_head;
-+ cfun->machine->minipool_label_head->label = label;
-+ cfun->machine->minipool_label_head->next = 0;
-+ cfun->machine->minipool_label_head->prev = 0;
-+ }
-+ else
-+ {
-+ cfun->machine->minipool_label_tail->next =
-+ ggc_alloc (sizeof (minipool_labels));
-+ cfun->machine->minipool_label_tail->next->label = label;
-+ cfun->machine->minipool_label_tail->next->next = 0;
-+ cfun->machine->minipool_label_tail->next->prev =
-+ cfun->machine->minipool_label_tail;
-+ cfun->machine->minipool_label_tail =
-+ cfun->machine->minipool_label_tail->next;
-+ }
-+ }
++{
++ if (!cfun->machine->minipool_label_head)
++ {
++ cfun->machine->minipool_label_head =
++ ggc_alloc (sizeof (minipool_labels));
++ cfun->machine->minipool_label_tail = cfun->machine->minipool_label_head;
++ cfun->machine->minipool_label_head->label = label;
++ cfun->machine->minipool_label_head->next = 0;
++ cfun->machine->minipool_label_head->prev = 0;
++ }
++ else
++ {
++ cfun->machine->minipool_label_tail->next =
++ ggc_alloc (sizeof (minipool_labels));
++ cfun->machine->minipool_label_tail->next->label = label;
++ cfun->machine->minipool_label_tail->next->next = 0;
++ cfun->machine->minipool_label_tail->next->prev =
++ cfun->machine->minipool_label_tail;
++ cfun->machine->minipool_label_tail =
++ cfun->machine->minipool_label_tail->next;
++ }
++}
+
+/* Output the literal table */
+static void
+dump_minipool (rtx scan)
-+ {
-+ Mnode *mp;
-+ Mnode *nmp;
++{
++ Mnode *mp;
++ Mnode *nmp;
+
-+ if (dump_file)
-+ fprintf (dump_file,
-+ ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
-+ INSN_UID (scan), (unsigned long) minipool_barrier->address, 4);
++ if (dump_file)
++ fprintf (dump_file,
++ ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
++ INSN_UID (scan), (unsigned long) minipool_barrier->address, 4);
+
-+ scan = emit_insn_after (gen_consttable_start (), scan);
-+ scan = emit_insn_after (gen_align_4 (), scan);
-+ scan = emit_label_after (minipool_vector_label, scan);
-+ new_minipool_label (minipool_vector_label);
++ scan = emit_insn_after (gen_consttable_start (), scan);
++ scan = emit_insn_after (gen_align_4 (), scan);
++ scan = emit_label_after (minipool_vector_label, scan);
++ new_minipool_label (minipool_vector_label);
+
-+ for (mp = minipool_vector_head; mp != NULL; mp = nmp)
-+ {
-+ if (mp->refcount > 0)
-+ {
-+ if (dump_file)
-+ {
-+ fprintf (dump_file,
-+ ";; Offset %u, min %ld, max %ld ",
-+ (unsigned) mp->offset, (unsigned long) mp->min_address,
-+ (unsigned long) mp->max_address);
-+ avr32_print_value (dump_file, mp->value);
-+ fputc ('\n', dump_file);
-+ }
++ for (mp = minipool_vector_head; mp != NULL; mp = nmp)
++ {
++ if (mp->refcount > 0)
++ {
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; Offset %u, min %ld, max %ld ",
++ (unsigned) mp->offset, (unsigned long) mp->min_address,
++ (unsigned long) mp->max_address);
++ avr32_print_value (dump_file, mp->value);
++ fputc ('\n', dump_file);
++ }
+
-+ switch (mp->fix_size)
-+ {
++ switch (mp->fix_size)
++ {
+#ifdef HAVE_consttable_4
-+ case 4:
-+ scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
-+ break;
++ case 4:
++ scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
++ break;
+
+#endif
+#ifdef HAVE_consttable_8
-+ case 8:
-+ scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
-+ break;
++ case 8:
++ scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
++ break;
+
+#endif
+#ifdef HAVE_consttable_16
+ case 0:
+ /* This can happen for force-minipool entries which just are
+ there to force the minipool to be generate. */
-+ break;
-+ default:
-+ abort ();
-+ break;
-+ }
-+ }
++ break;
++ default:
++ abort ();
++ break;
++ }
++ }
+
-+ nmp = mp->next;
-+ free (mp);
-+ }
++ nmp = mp->next;
++ free (mp);
++ }
+
-+ minipool_vector_head = minipool_vector_tail = NULL;
-+ scan = emit_insn_after (gen_consttable_end (), scan);
-+ scan = emit_barrier_after (scan);
-+ }
++ minipool_vector_head = minipool_vector_tail = NULL;
++ scan = emit_insn_after (gen_consttable_end (), scan);
++ scan = emit_barrier_after (scan);
++}
+
+/* Return the cost of forcibly inserting a barrier after INSN. */
+static int
+avr32_barrier_cost (rtx insn)
-+ {
-+ /* Basing the location of the pool on the loop depth is preferable, but at
++{
++ /* Basing the location of the pool on the loop depth is preferable, but at
+ the moment, the basic block information seems to be corrupt by this
+ stage of the compilation. */
-+ int base_cost = 50;
-+ rtx next = next_nonnote_insn (insn);
++ int base_cost = 50;
++ rtx next = next_nonnote_insn (insn);
+
-+ if (next != NULL && GET_CODE (next) == CODE_LABEL)
-+ base_cost -= 20;
++ if (next != NULL && GET_CODE (next) == CODE_LABEL)
++ base_cost -= 20;
+
-+ switch (GET_CODE (insn))
++ switch (GET_CODE (insn))
+ {
+ case CODE_LABEL:
+ /* It will always be better to place the table before the label, rather
+ default:
+ return base_cost + 10;
+ }
-+ }
++}
+
+/* Find the best place in the insn stream in the range
+ (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
+ /* This code shouldn't have been called if there was a natural barrier
+ within range. */
+ if (GET_CODE (from) == BARRIER)
-+ abort ();
++ abort ();
+
+ /* Count the length of this insn. */
+ count += get_attr_length (from);
+ /* If there is a jump table, add its length. */
+ tmp = is_jump_table (from);
+ if (tmp != NULL)
-+ {
-+ count += get_jump_table_size (tmp);
++ {
++ count += get_jump_table_size (tmp);
+
-+ /* Jump tables aren't in a basic block, so base the cost on the
++ /* Jump tables aren't in a basic block, so base the cost on the
+ dispatch insn. If we select this location, we will still put
+ the pool after the table. */
-+ new_cost = avr32_barrier_cost (from);
++ new_cost = avr32_barrier_cost (from);
+
-+ if (count < max_count && new_cost <= selected_cost)
-+ {
-+ selected = tmp;
-+ selected_cost = new_cost;
-+ selected_address = fix->address + count;
-+ }
++ if (count < max_count && new_cost <= selected_cost)
++ {
++ selected = tmp;
++ selected_cost = new_cost;
++ selected_address = fix->address + count;
++ }
+
-+ /* Continue after the dispatch table. */
-+ from = NEXT_INSN (tmp);
-+ continue;
-+ }
++ /* Continue after the dispatch table. */
++ from = NEXT_INSN (tmp);
++ continue;
++ }
+
+ new_cost = avr32_barrier_cost (from);
+
+ if (count < max_count && new_cost <= selected_cost)
-+ {
-+ selected = from;
-+ selected_cost = new_cost;
-+ selected_address = fix->address + count;
-+ }
++ {
++ selected = from;
++ selected_cost = new_cost;
++ selected_address = fix->address + count;
++ }
+
+ from = NEXT_INSN (from);
+ }
+ ADDRESS. */
+static void
+push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
-+ {
-+ Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
++{
++ Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
+
-+ fix->insn = insn;
-+ fix->address = address;
++ fix->insn = insn;
++ fix->address = address;
+
-+ fix->next = NULL;
-+ if (minipool_fix_head != NULL)
-+ minipool_fix_tail->next = fix;
-+ else
-+ minipool_fix_head = fix;
++ fix->next = NULL;
++ if (minipool_fix_head != NULL)
++ minipool_fix_tail->next = fix;
++ else
++ minipool_fix_head = fix;
+
-+ minipool_fix_tail = fix;
-+ }
++ minipool_fix_tail = fix;
++}
+
+/* Record INSN, which will need fixing up to load a value from the
+ minipool. ADDRESS is the offset of the insn since the start of the
+ MODE. */
+static void
+push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx * loc,
-+ enum machine_mode mode, rtx value)
-+ {
-+ Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
-+ rtx body = PATTERN (insn);
++ enum machine_mode mode, rtx value)
++{
++ Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
++ rtx body = PATTERN (insn);
+
-+ fix->insn = insn;
-+ fix->address = address;
-+ fix->loc = loc;
-+ fix->mode = mode;
-+ fix->fix_size = MINIPOOL_FIX_SIZE (mode, value);
-+ fix->value = value;
++ fix->insn = insn;
++ fix->address = address;
++ fix->loc = loc;
++ fix->mode = mode;
++ fix->fix_size = MINIPOOL_FIX_SIZE (mode, value);
++ fix->value = value;
+
-+ if (GET_CODE (body) == PARALLEL)
-+ {
-+ /* Mcall : Ks16 << 2 */
-+ fix->forwards = ((1 << 15) - 1) << 2;
-+ fix->backwards = (1 << 15) << 2;
-+ }
-+ else if (GET_CODE (body) == SET
-+ && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 4)
-+ {
++ if (GET_CODE (body) == PARALLEL)
++ {
++ /* Mcall : Ks16 << 2 */
++ fix->forwards = ((1 << 15) - 1) << 2;
++ fix->backwards = (1 << 15) << 2;
++ }
++ else if (GET_CODE (body) == SET
++ && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 4)
++ {
+ /* Word Load */
-+ if (TARGET_HARD_FLOAT
-+ && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT)
-+ {
-+ /* Ldc0.w : Ku12 << 2 */
-+ fix->forwards = ((1 << 12) - 1) << 2;
-+ fix->backwards = 0;
-+ }
-+ else
-+ {
-+ if (optimize_size)
-+ {
-+ /* Lddpc : Ku7 << 2 */
-+ fix->forwards = ((1 << 7) - 1) << 2;
-+ fix->backwards = 0;
-+ }
-+ else
-+ {
-+ /* Ld.w : Ks16 */
-+ fix->forwards = ((1 << 15) - 4);
-+ fix->backwards = (1 << 15);
-+ }
-+ }
-+ }
-+ else if (GET_CODE (body) == SET
-+ && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 8)
-+ {
-+ /* Double word load */
-+ if (TARGET_HARD_FLOAT
-+ && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT)
-+ {
-+ /* Ldc0.d : Ku12 << 2 */
-+ fix->forwards = ((1 << 12) - 1) << 2;
-+ fix->backwards = 0;
-+ }
-+ else
-+ {
-+ /* Ld.d : Ks16 */
-+ fix->forwards = ((1 << 15) - 4);
-+ fix->backwards = (1 << 15);
-+ }
-+ }
-+ else if (GET_CODE (body) == UNSPEC_VOLATILE
-+ && XINT (body, 1) == VUNSPEC_MVRC)
-+ {
-+ /* Coprocessor load */
-+ /* Ldc : Ku8 << 2 */
-+ fix->forwards = ((1 << 8) - 1) << 2;
-+ fix->backwards = 0;
-+ }
-+ else
-+ {
-+ /* Assume worst case which is lddpc insn. */
-+ fix->forwards = ((1 << 7) - 1) << 2;
-+ fix->backwards = 0;
-+ }
++ if (TARGET_HARD_FLOAT
++ && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT)
++ {
++ /* Ldc0.w : Ku12 << 2 */
++ fix->forwards = ((1 << 12) - 1) << 2;
++ fix->backwards = 0;
++ }
++ else
++ {
++ if (optimize_size)
++ {
++ /* Lddpc : Ku7 << 2 */
++ fix->forwards = ((1 << 7) - 1) << 2;
++ fix->backwards = 0;
++ }
++ else
++ {
++ /* Ld.w : Ks16 */
++ fix->forwards = ((1 << 15) - 4);
++ fix->backwards = (1 << 15);
++ }
++ }
++ }
++ else if (GET_CODE (body) == SET
++ && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 8)
++ {
++ /* Double word load */
++ if (TARGET_HARD_FLOAT
++ && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT)
++ {
++ /* Ldc0.d : Ku12 << 2 */
++ fix->forwards = ((1 << 12) - 1) << 2;
++ fix->backwards = 0;
++ }
++ else
++ {
++ /* Ld.d : Ks16 */
++ fix->forwards = ((1 << 15) - 4);
++ fix->backwards = (1 << 15);
++ }
++ }
++ else if (GET_CODE (body) == UNSPEC_VOLATILE
++ && XINT (body, 1) == VUNSPEC_MVRC)
++ {
++ /* Coprocessor load */
++ /* Ldc : Ku8 << 2 */
++ fix->forwards = ((1 << 8) - 1) << 2;
++ fix->backwards = 0;
++ }
++ else
++ {
++ /* Assume worst case which is lddpc insn. */
++ fix->forwards = ((1 << 7) - 1) << 2;
++ fix->backwards = 0;
++ }
+
-+ fix->minipool = NULL;
++ fix->minipool = NULL;
+
-+ /* If an insn doesn't have a range defined for it, then it isn't expecting
++ /* If an insn doesn't have a range defined for it, then it isn't expecting
+ to be reworked by this code. Better to abort now than to generate duff
+ assembly code. */
-+ if (fix->forwards == 0 && fix->backwards == 0)
-+ abort ();
++ if (fix->forwards == 0 && fix->backwards == 0)
++ abort ();
+
-+ if (dump_file)
-+ {
-+ fprintf (dump_file,
-+ ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
-+ GET_MODE_NAME (mode),
-+ INSN_UID (insn), (unsigned long) address,
-+ -1 * (long) fix->backwards, (long) fix->forwards);
-+ avr32_print_value (dump_file, fix->value);
-+ fprintf (dump_file, "\n");
-+ }
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
++ GET_MODE_NAME (mode),
++ INSN_UID (insn), (unsigned long) address,
++ -1 * (long) fix->backwards, (long) fix->forwards);
++ avr32_print_value (dump_file, fix->value);
++ fprintf (dump_file, "\n");
++ }
+
-+ /* Add it to the chain of fixes. */
-+ fix->next = NULL;
++ /* Add it to the chain of fixes. */
++ fix->next = NULL;
+
-+ if (minipool_fix_head != NULL)
-+ minipool_fix_tail->next = fix;
-+ else
-+ minipool_fix_head = fix;
++ if (minipool_fix_head != NULL)
++ minipool_fix_tail->next = fix;
++ else
++ minipool_fix_head = fix;
+
-+ minipool_fix_tail = fix;
-+ }
++ minipool_fix_tail = fix;
++}
+
+/* Scan INSN and note any of its operands that need fixing.
+ If DO_PUSHES is false we do not actually push any of the fixups
+ of constants that will be converted into minipool loads. */
+static bool
+note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
-+ {
-+ bool result = false;
-+ int opno;
++{
++ bool result = false;
++ int opno;
+
-+ extract_insn (insn);
++ extract_insn (insn);
+
-+ if (!constrain_operands (1))
-+ fatal_insn_not_found (insn);
++ if (!constrain_operands (1))
++ fatal_insn_not_found (insn);
+
-+ if (recog_data.n_alternatives == 0)
-+ return false;
++ if (recog_data.n_alternatives == 0)
++ return false;
+
-+ /* Fill in recog_op_alt with information about the constraints of this
++ /* Fill in recog_op_alt with information about the constraints of this
+ insn. */
-+ preprocess_constraints ();
++ preprocess_constraints ();
+
-+ for (opno = 0; opno < recog_data.n_operands; opno++)
-+ {
-+ rtx op;
++ for (opno = 0; opno < recog_data.n_operands; opno++)
++ {
++ rtx op;
+
-+ /* Things we need to fix can only occur in inputs. */
-+ if (recog_data.operand_type[opno] != OP_IN)
-+ continue;
++ /* Things we need to fix can only occur in inputs. */
++ if (recog_data.operand_type[opno] != OP_IN)
++ continue;
+
-+ op = recog_data.operand[opno];
++ op = recog_data.operand[opno];
+
-+ if (avr32_const_pool_ref_operand (op, GET_MODE (op)))
-+ {
-+ if (do_pushes)
-+ {
-+ rtx cop = avoid_constant_pool_reference (op);
++ if (avr32_const_pool_ref_operand (op, GET_MODE (op)))
++ {
++ if (do_pushes)
++ {
++ rtx cop = avoid_constant_pool_reference (op);
+
-+ /* Casting the address of something to a mode narrower than a
++ /* Casting the address of something to a mode narrower than a
+ word can cause avoid_constant_pool_reference() to return the
+ pool reference itself. That's no good to us here. Lets
+ just hope that we can use the constant pool value directly.
-+ */
-+ if (op == cop)
-+ cop = get_pool_constant (XEXP (op, 0));
++ */
++ if (op == cop)
++ cop = get_pool_constant (XEXP (op, 0));
+
-+ push_minipool_fix (insn, address,
-+ recog_data.operand_loc[opno],
-+ recog_data.operand_mode[opno], cop);
-+ }
++ push_minipool_fix (insn, address,
++ recog_data.operand_loc[opno],
++ recog_data.operand_mode[opno], cop);
++ }
+
-+ result = true;
-+ }
-+ else if (TARGET_HAS_ASM_ADDR_PSEUDOS
-+ && avr32_address_operand (op, GET_MODE (op)))
-+ {
-+ /* Handle pseudo instructions using a direct address. These pseudo
++ result = true;
++ }
++ else if (TARGET_HAS_ASM_ADDR_PSEUDOS
++ && avr32_address_operand (op, GET_MODE (op)))
++ {
++ /* Handle pseudo instructions using a direct address. These pseudo
+ instructions might need entries in the constant pool and we must
+ therefor create a constant pool for them, in case the
+ assembler/linker needs to insert entries. */
-+ if (do_pushes)
-+ {
-+ /* Push a dummy constant pool entry so that the .cpool
++ if (do_pushes)
++ {
++ /* Push a dummy constant pool entry so that the .cpool
+ directive should be inserted on the appropriate place in the
+ code even if there are no real constant pool entries. This
+ is used by the assembler and linker to know where to put
+ generated constant pool entries. */
-+ push_minipool_fix (insn, address,
-+ recog_data.operand_loc[opno],
-+ recog_data.operand_mode[opno],
-+ gen_rtx_UNSPEC (VOIDmode,
-+ gen_rtvec (1, const0_rtx),
-+ UNSPEC_FORCE_MINIPOOL));
-+ result = true;
-+ }
-+ }
-+ }
-+ return result;
-+ }
++ push_minipool_fix (insn, address,
++ recog_data.operand_loc[opno],
++ recog_data.operand_mode[opno],
++ gen_rtx_UNSPEC (VOIDmode,
++ gen_rtvec (1, const0_rtx),
++ UNSPEC_FORCE_MINIPOOL));
++ result = true;
++ }
++ }
++ }
++ return result;
++}
+
+
+static int
+avr32_insn_is_cast (rtx insn)
-+ {
++{
++
++ if (NONJUMP_INSN_P (insn)
++ && GET_CODE (PATTERN (insn)) == SET
++ && (GET_CODE (SET_SRC (PATTERN (insn))) == ZERO_EXTEND
++ || GET_CODE (SET_SRC (PATTERN (insn))) == SIGN_EXTEND)
++ && REG_P (XEXP (SET_SRC (PATTERN (insn)), 0))
++ && REG_P (SET_DEST (PATTERN (insn))))
++ return true;
++ return false;
++}
++
++/*
++ Replace all occurances of reg FROM with reg TO in X */
++
++rtx
++avr32_replace_reg (rtx x, rtx from, rtx to)
++{
++ int i, j;
++ const char *fmt;
++
++ gcc_assert ( REG_P (from) && REG_P (to) );
++
++ /* Allow this function to make replacements in EXPR_LISTs. */
++ if (x == 0)
++ return 0;
++
++ if (rtx_equal_p (x, from))
++ return to;
++
++ if (GET_CODE (x) == SUBREG)
++ {
++ rtx new = avr32_replace_reg (SUBREG_REG (x), from, to);
++
++ if (GET_CODE (new) == CONST_INT)
++ {
++ x = simplify_subreg (GET_MODE (x), new,
++ GET_MODE (SUBREG_REG (x)),
++ SUBREG_BYTE (x));
++ gcc_assert (x);
++ }
++ else
++ SUBREG_REG (x) = new;
++
++ return x;
++ }
++ else if (GET_CODE (x) == ZERO_EXTEND)
++ {
++ rtx new = avr32_replace_reg (XEXP (x, 0), from, to);
++
++ if (GET_CODE (new) == CONST_INT)
++ {
++ x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
++ new, GET_MODE (XEXP (x, 0)));
++ gcc_assert (x);
++ }
++ else
++ XEXP (x, 0) = new;
++
++ return x;
++ }
++
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'e')
++ XEXP (x, i) = avr32_replace_reg (XEXP (x, i), from, to);
++ else if (fmt[i] == 'E')
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ XVECEXP (x, i, j) = avr32_replace_reg (XVECEXP (x, i, j), from, to);
++ }
++
++ return x;
++}
+
-+ if (NONJUMP_INSN_P (insn)
-+ && GET_CODE (PATTERN (insn)) == SET
-+ && (GET_CODE (SET_SRC (PATTERN (insn))) == ZERO_EXTEND
-+ || GET_CODE (SET_SRC (PATTERN (insn))) == SIGN_EXTEND)
-+ && REG_P (XEXP (SET_SRC (PATTERN (insn)), 0))
-+ && REG_P (SET_DEST (PATTERN (insn))))
-+ return true;
-+ return false;
-+ }
+
+/* FIXME: The level of nesting in this function is way too deep. It needs to be
+ torn apart. */
+static void
+avr32_reorg_optimization (void)
-+ {
-+ rtx first = get_insns ();
-+ rtx insn;
++{
++ rtx first = get_first_nonnote_insn ();
++ rtx insn;
+
-+ if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
-+ {
++ if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
++ {
+
-+ /* Scan through all insns looking for cast operations. */
-+ if (dump_file)
-+ {
-+ fprintf (dump_file, ";; Deleting redundant cast operations:\n");
-+ }
-+ for (insn = first; insn; insn = NEXT_INSN (insn))
-+ {
-+ rtx reg, src_reg, scan;
-+ enum machine_mode mode;
-+ int unused_cast;
-+ rtx label_ref;
-+
-+ if (avr32_insn_is_cast (insn)
-+ && (GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == QImode
-+ || GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == HImode))
-+ {
-+ mode = GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0));
-+ reg = SET_DEST (PATTERN (insn));
-+ src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
-+ }
-+ else
-+ {
-+ continue;
-+ }
++ /* Scan through all insns looking for cast operations. */
++ if (dump_file)
++ {
++ fprintf (dump_file, ";; Deleting redundant cast operations:\n");
++ }
++ for (insn = first; insn; insn = NEXT_INSN (insn))
++ {
++ rtx reg, src_reg, scan;
++ enum machine_mode mode;
++ int unused_cast;
++ rtx label_ref;
++
++ if (avr32_insn_is_cast (insn)
++ && (GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == QImode
++ || GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == HImode))
++ {
++ mode = GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0));
++ reg = SET_DEST (PATTERN (insn));
++ src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
++ }
++ else
++ {
++ continue;
++ }
+
-+ unused_cast = false;
-+ label_ref = NULL_RTX;
-+ for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
-+ {
-+ /* Check if we have reached the destination of a simple
++ unused_cast = false;
++ label_ref = NULL_RTX;
++ for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
++ {
++ /* Check if we have reached the destination of a simple
+ conditional jump which we have already scanned past. If so,
+ we can safely continue scanning. */
-+ if (LABEL_P (scan) && label_ref != NULL_RTX)
-+ {
-+ if (CODE_LABEL_NUMBER (scan) ==
-+ CODE_LABEL_NUMBER (XEXP (label_ref, 0)))
-+ label_ref = NULL_RTX;
-+ else
-+ break;
-+ }
++ if (LABEL_P (scan) && label_ref != NULL_RTX)
++ {
++ if (CODE_LABEL_NUMBER (scan) ==
++ CODE_LABEL_NUMBER (XEXP (label_ref, 0)))
++ label_ref = NULL_RTX;
++ else
++ break;
++ }
+
-+ if (!INSN_P (scan))
-+ continue;
++ if (!INSN_P (scan))
++ continue;
+
-+ /* For conditional jumps we can manage to keep on scanning if
++ /* For conditional jumps we can manage to keep on scanning if
+ we meet the destination label later on before any new jump
+ insns occure. */
-+ if (GET_CODE (scan) == JUMP_INSN)
-+ {
-+ if (any_condjump_p (scan) && label_ref == NULL_RTX)
-+ label_ref = condjump_label (scan);
-+ else
-+ break;
-+ }
++ if (GET_CODE (scan) == JUMP_INSN)
++ {
++ if (any_condjump_p (scan) && label_ref == NULL_RTX)
++ label_ref = condjump_label (scan);
++ else
++ break;
++ }
+
-+ if (!reg_mentioned_p (reg, PATTERN (scan)))
-+ continue;
++ /* Check if we have a call and the register is used as an argument. */
++ if (CALL_P (scan)
++ && find_reg_fusage (scan, USE, reg) )
++ break;
+
-+ /* Check if casted register is used in this insn */
-+ if ((regno_use_in (REGNO (reg), PATTERN (scan)) != NULL_RTX)
-+ && (GET_MODE (regno_use_in (REGNO (reg), PATTERN (scan))) ==
-+ GET_MODE (reg)))
-+ {
-+ /* If not used in the source to the set or in a memory
++ if (!reg_mentioned_p (reg, PATTERN (scan)))
++ continue;
++
++ /* Check if casted register is used in this insn */
++ if ((regno_use_in (REGNO (reg), PATTERN (scan)) != NULL_RTX)
++ && (GET_MODE (regno_use_in (REGNO (reg), PATTERN (scan))) ==
++ GET_MODE (reg)))
++ {
++ /* If not used in the source to the set or in a memory
+ expression in the destiantion then the register is used
+ as a destination and is really dead. */
-+ if (single_set (scan)
-+ && GET_CODE (PATTERN (scan)) == SET
-+ && REG_P (SET_DEST (PATTERN (scan)))
-+ && !regno_use_in (REGNO (reg), SET_SRC (PATTERN (scan)))
-+ && label_ref == NULL_RTX)
-+ {
-+ unused_cast = true;
-+ }
-+ break;
-+ }
-+
-+ /* Check if register is dead or set in this insn */
-+ if (dead_or_set_p (scan, reg))
-+ {
-+ unused_cast = true;
-+ break;
-+ }
-+ }
++ if (single_set (scan)
++ && GET_CODE (PATTERN (scan)) == SET
++ && REG_P (SET_DEST (PATTERN (scan)))
++ && !regno_use_in (REGNO (reg), SET_SRC (PATTERN (scan)))
++ && label_ref == NULL_RTX)
++ {
++ unused_cast = true;
++ }
++ break;
++ }
+
-+ /* Check if we have unresolved conditional jumps */
-+ if (label_ref != NULL_RTX)
-+ continue;
++ /* Check if register is dead or set in this insn */
++ if (dead_or_set_p (scan, reg))
++ {
++ unused_cast = true;
++ break;
++ }
++ }
+
-+ if (unused_cast)
-+ {
-+ if (REGNO (reg) == REGNO (XEXP (SET_SRC (PATTERN (insn)), 0)))
-+ {
-+ /* One operand cast, safe to delete */
-+ if (dump_file)
-+ {
-+ fprintf (dump_file,
-+ ";; INSN %i removed, casted register %i value not used.\n",
-+ INSN_UID (insn), REGNO (reg));
-+ }
-+ SET_INSN_DELETED (insn);
-+ /* Force the instruction to be recognized again */
-+ INSN_CODE (insn) = -1;
-+ }
-+ else
-+ {
-+ /* Two operand cast, which really could be substituted with
++ /* Check if we have unresolved conditional jumps */
++ if (label_ref != NULL_RTX)
++ continue;
++
++ if (unused_cast)
++ {
++ if (REGNO (reg) == REGNO (XEXP (SET_SRC (PATTERN (insn)), 0)))
++ {
++ /* One operand cast, safe to delete */
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; INSN %i removed, casted register %i value not used.\n",
++ INSN_UID (insn), REGNO (reg));
++ }
++ SET_INSN_DELETED (insn);
++ /* Force the instruction to be recognized again */
++ INSN_CODE (insn) = -1;
++ }
++ else
++ {
++ /* Two operand cast, which really could be substituted with
+ a move, if the source register is dead after the cast
+ insn and then the insn which sets the source register
+ could instead directly set the destination register for
+ the cast. As long as there are no insns in between which
+ uses the register. */
-+ rtx link = NULL_RTX;
-+ rtx set;
-+ rtx src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
-+ unused_cast = false;
-+
-+ if (!find_reg_note (insn, REG_DEAD, src_reg))
-+ continue;
-+
-+ /* Search for the insn which sets the source register */
-+ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
-+ {
-+ if (REG_NOTE_KIND (link) != 0)
-+ continue;
-+ set = single_set (XEXP (link, 0));
-+ if (set && rtx_equal_p (src_reg, SET_DEST (set)))
-+ {
-+ link = XEXP (link, 0);
-+ break;
-+ }
-+ }
-+
-+ /* Found no link or link is a call insn where we can not
++ rtx link = NULL_RTX;
++ rtx set;
++ rtx src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
++ unused_cast = false;
++
++ if (!find_reg_note (insn, REG_DEAD, src_reg))
++ continue;
++
++ /* Search for the insn which sets the source register */
++ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
++ {
++ if (REG_NOTE_KIND (link) != 0)
++ continue;
++ set = single_set (XEXP (link, 0));
++ if (set && rtx_equal_p (src_reg, SET_DEST (set)))
++ {
++ link = XEXP (link, 0);
++ break;
++ }
++ }
++
++ /* Found no link or link is a call insn where we can not
+ change the destination register */
-+ if (link == NULL_RTX || CALL_P (link))
-+ continue;
++ if (link == NULL_RTX || CALL_P (link))
++ continue;
+
-+ /* Scan through all insn between link and insn */
-+ for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
-+ {
-+ /* Don't try to trace forward past a CODE_LABEL if we
++ /* Scan through all insn between link and insn */
++ for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
++ {
++ /* Don't try to trace forward past a CODE_LABEL if we
+ haven't seen INSN yet. Ordinarily, we will only
+ find the setting insn in LOG_LINKS if it is in the
+ same basic block. However, cross-jumping can insert
+ may have two targets depending on where we came
+ from. */
+
-+ if (GET_CODE (scan) == CODE_LABEL)
-+ break;
++ if (GET_CODE (scan) == CODE_LABEL)
++ break;
+
-+ if (!INSN_P (scan))
-+ continue;
++ if (!INSN_P (scan))
++ continue;
+
-+ /* Don't try to trace forward past a JUMP. To optimize
++ /* Don't try to trace forward past a JUMP. To optimize
+ safely, we would have to check that all the
+ instructions at the jump destination did not use REG.
-+ */
++ */
+
-+ if (GET_CODE (scan) == JUMP_INSN)
-+ {
-+ break;
-+ }
++ if (GET_CODE (scan) == JUMP_INSN)
++ {
++ break;
++ }
+
-+ if (!reg_mentioned_p (src_reg, PATTERN (scan)))
-+ continue;
++ if (!reg_mentioned_p (src_reg, PATTERN (scan)))
++ continue;
+
-+ /* We have reached the cast insn */
-+ if (scan == insn)
-+ {
-+ /* We can remove cast and replace the destination
++ /* We have reached the cast insn */
++ if (scan == insn)
++ {
++ /* We can remove cast and replace the destination
+ register of the link insn with the destination
+ of the cast */
-+ if (dump_file)
-+ {
-+ fprintf (dump_file,
-+ ";; INSN %i removed, casted value unused. "
-+ "Destination of removed cast operation: register %i, folded into INSN %i.\n",
-+ INSN_UID (insn), REGNO (reg),
-+ INSN_UID (link));
-+ }
-+ /* Update link insn */
-+ SET_DEST (PATTERN (link)) =
-+ gen_rtx_REG (mode, REGNO (reg));
-+ /* Force the instruction to be recognized again */
-+ INSN_CODE (link) = -1;
-+
-+ /* Delete insn */
-+ SET_INSN_DELETED (insn);
-+ /* Force the instruction to be recognized again */
-+ INSN_CODE (insn) = -1;
-+ break;
-+ }
-+ }
-+ }
-+ }
-+ }
-+ }
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; INSN %i removed, casted value unused. "
++ "Destination of removed cast operation: register %i, folded into INSN %i.\n",
++ INSN_UID (insn), REGNO (reg),
++ INSN_UID (link));
++ }
++ /* Update link insn */
++ SET_DEST (PATTERN (link)) =
++ gen_rtx_REG (mode, REGNO (reg));
++ /* Force the instruction to be recognized again */
++ INSN_CODE (link) = -1;
++
++ /* Delete insn */
++ SET_INSN_DELETED (insn);
++ /* Force the instruction to be recognized again */
++ INSN_CODE (insn) = -1;
++ break;
++ }
++ }
++ }
++ }
++ }
++ }
+
-+ if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
-+ {
++ if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
++ {
+
-+ /* Scan through all insns looking for shifted add operations */
-+ if (dump_file)
-+ {
-+ fprintf (dump_file,
-+ ";; Deleting redundant shifted add operations:\n");
-+ }
-+ for (insn = first; insn; insn = NEXT_INSN (insn))
-+ {
-+ rtx reg, mem_expr, scan, op0, op1;
-+ int add_only_used_as_pointer;
-+
-+ if (INSN_P (insn)
-+ && GET_CODE (PATTERN (insn)) == SET
-+ && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
-+ && (GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == MULT
-+ || GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == ASHIFT)
-+ && GET_CODE (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1)) ==
-+ CONST_INT && REG_P (SET_DEST (PATTERN (insn)))
-+ && REG_P (XEXP (SET_SRC (PATTERN (insn)), 1))
-+ && REG_P (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0)))
-+ {
-+ reg = SET_DEST (PATTERN (insn));
-+ mem_expr = SET_SRC (PATTERN (insn));
-+ op0 = XEXP (XEXP (mem_expr, 0), 0);
-+ op1 = XEXP (mem_expr, 1);
-+ }
-+ else
-+ {
-+ continue;
-+ }
++ /* Scan through all insns looking for shifted add operations */
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; Deleting redundant shifted add operations:\n");
++ }
++ for (insn = first; insn; insn = NEXT_INSN (insn))
++ {
++ rtx reg, mem_expr, scan, op0, op1;
++ int add_only_used_as_pointer;
++
++ if (INSN_P (insn)
++ && GET_CODE (PATTERN (insn)) == SET
++ && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
++ && (GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == MULT
++ || GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == ASHIFT)
++ && GET_CODE (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1)) ==
++ CONST_INT && REG_P (SET_DEST (PATTERN (insn)))
++ && REG_P (XEXP (SET_SRC (PATTERN (insn)), 1))
++ && REG_P (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0)))
++ {
++ reg = SET_DEST (PATTERN (insn));
++ mem_expr = SET_SRC (PATTERN (insn));
++ op0 = XEXP (XEXP (mem_expr, 0), 0);
++ op1 = XEXP (mem_expr, 1);
++ }
++ else
++ {
++ continue;
++ }
+
-+ /* Scan forward the check if the result of the shifted add
++ /* Scan forward the check if the result of the shifted add
+ operation is only used as an address in memory operations and
+ that the operands to the shifted add are not clobbered. */
-+ add_only_used_as_pointer = false;
-+ for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
-+ {
-+ if (!INSN_P (scan))
-+ continue;
++ add_only_used_as_pointer = false;
++ for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
++ {
++ if (!INSN_P (scan))
++ continue;
+
-+ /* Don't try to trace forward past a JUMP or CALL. To optimize
++ /* Don't try to trace forward past a JUMP or CALL. To optimize
+ safely, we would have to check that all the instructions at
+ the jump destination did not use REG. */
+
-+ if (GET_CODE (scan) == JUMP_INSN)
-+ {
-+ break;
-+ }
++ if (GET_CODE (scan) == JUMP_INSN)
++ {
++ break;
++ }
+
-+ /* If used in a call insn then we cannot optimize it away */
-+ if (CALL_P (scan) && find_regno_fusage (scan, USE, REGNO (reg)))
-+ break;
++ /* If used in a call insn then we cannot optimize it away */
++ if (CALL_P (scan) && find_regno_fusage (scan, USE, REGNO (reg)))
++ break;
+
-+ /* If any of the operands of the shifted add are clobbered we
++ /* If any of the operands of the shifted add are clobbered we
+ cannot optimize the shifted adda away */
-+ if ((reg_set_p (op0, scan) && (REGNO (op0) != REGNO (reg)))
-+ || (reg_set_p (op1, scan) && (REGNO (op1) != REGNO (reg))))
-+ break;
++ if ((reg_set_p (op0, scan) && (REGNO (op0) != REGNO (reg)))
++ || (reg_set_p (op1, scan) && (REGNO (op1) != REGNO (reg))))
++ break;
+
-+ if (!reg_mentioned_p (reg, PATTERN (scan)))
-+ continue;
++ if (!reg_mentioned_p (reg, PATTERN (scan)))
++ continue;
+
-+ /* If used any other place than as a pointer or as the
++ /* If used any other place than as a pointer or as the
+ destination register we failed */
-+ if (!(single_set (scan)
++ if (!(single_set (scan)
+ && GET_CODE (PATTERN (scan)) == SET
+ && ((MEM_P (SET_DEST (PATTERN (scan)))
-+ && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
-+ && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) ==
-+ REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan)))
-+ &&
-+ REG_P (XEXP
-+ (SET_SRC (PATTERN (scan)),
-+ 0))
-+ &&
-+ REGNO (XEXP
-+ (SET_SRC (PATTERN (scan)),
-+ 0)) == REGNO (reg))))
-+ && !(GET_CODE (PATTERN (scan)) == SET
-+ && REG_P (SET_DEST (PATTERN (scan)))
-+ && !regno_use_in (REGNO (reg),
-+ SET_SRC (PATTERN (scan)))))
-+ break;
-+
-+ /* Check if register is dead or set in this insn */
-+ if (dead_or_set_p (scan, reg))
-+ {
-+ add_only_used_as_pointer = true;
-+ break;
-+ }
-+ }
++ && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
++ && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) == REGNO (reg))
++ || (MEM_P (SET_SRC (PATTERN (scan)))
++ && REG_P (XEXP (SET_SRC (PATTERN (scan)), 0))
++ && REGNO (XEXP
++ (SET_SRC (PATTERN (scan)), 0)) == REGNO (reg))))
++ && !(GET_CODE (PATTERN (scan)) == SET
++ && REG_P (SET_DEST (PATTERN (scan)))
++ && !regno_use_in (REGNO (reg),
++ SET_SRC (PATTERN (scan)))))
++ break;
++
++ /* We cannot replace the pointer in TImode insns
++ as these has a differene addressing mode than the other
++ memory insns. */
++ if ( GET_MODE (SET_DEST (PATTERN (scan))) == TImode )
++ break;
++
++ /* Check if register is dead or set in this insn */
++ if (dead_or_set_p (scan, reg))
++ {
++ add_only_used_as_pointer = true;
++ break;
++ }
++ }
+
-+ if (add_only_used_as_pointer)
-+ {
-+ /* Lets delete the add insn and replace all memory references
++ if (add_only_used_as_pointer)
++ {
++ /* Lets delete the add insn and replace all memory references
+ which uses the pointer with the full expression. */
-+ if (dump_file)
-+ {
-+ fprintf (dump_file,
-+ ";; Deleting INSN %i since address expression can be folded into all "
-+ "memory references using this expression\n",
-+ INSN_UID (insn));
-+ }
-+ SET_INSN_DELETED (insn);
-+ /* Force the instruction to be recognized again */
-+ INSN_CODE (insn) = -1;
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; Deleting INSN %i since address expression can be folded into all "
++ "memory references using this expression\n",
++ INSN_UID (insn));
++ }
++ SET_INSN_DELETED (insn);
++ /* Force the instruction to be recognized again */
++ INSN_CODE (insn) = -1;
+
-+ for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
-+ {
-+ if (!INSN_P (scan))
-+ continue;
++ for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
++ {
++ if (!INSN_P (scan))
++ continue;
+
-+ if (!reg_mentioned_p (reg, PATTERN (scan)))
-+ continue;
++ if (!reg_mentioned_p (reg, PATTERN (scan)))
++ continue;
+
-+ /* If used any other place than as a pointer or as the
++ /* If used any other place than as a pointer or as the
+ destination register we failed */
-+ if ((single_set (scan)
-+ && GET_CODE (PATTERN (scan)) == SET
-+ && ((MEM_P (SET_DEST (PATTERN (scan)))
-+ && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
-+ && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) ==
-+ REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan)))
-+ &&
-+ REG_P (XEXP
-+ (SET_SRC (PATTERN (scan)),
-+ 0))
-+ &&
-+ REGNO (XEXP
-+ (SET_SRC (PATTERN (scan)),
-+ 0)) == REGNO (reg)))))
-+ {
-+ if (dump_file)
-+ {
-+ fprintf (dump_file,
-+ ";; Register %i replaced by indexed address in INSN %i\n",
-+ REGNO (reg), INSN_UID (scan));
-+ }
-+ if (MEM_P (SET_DEST (PATTERN (scan))))
-+ XEXP (SET_DEST (PATTERN (scan)), 0) = mem_expr;
-+ else
-+ XEXP (SET_SRC (PATTERN (scan)), 0) = mem_expr;
-+ }
-+
-+ /* Check if register is dead or set in this insn */
-+ if (dead_or_set_p (scan, reg))
-+ {
-+ break;
-+ }
++ if ((single_set (scan)
++ && GET_CODE (PATTERN (scan)) == SET
++ && ((MEM_P (SET_DEST (PATTERN (scan)))
++ && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
++ && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) ==
++ REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan)))
++ &&
++ REG_P (XEXP
++ (SET_SRC (PATTERN (scan)),
++ 0))
++ &&
++ REGNO (XEXP
++ (SET_SRC (PATTERN (scan)),
++ 0)) == REGNO (reg)))))
++ {
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; Register %i replaced by indexed address in INSN %i\n",
++ REGNO (reg), INSN_UID (scan));
++ }
++ if (MEM_P (SET_DEST (PATTERN (scan))))
++ XEXP (SET_DEST (PATTERN (scan)), 0) = mem_expr;
++ else
++ XEXP (SET_SRC (PATTERN (scan)), 0) = mem_expr;
++ }
++
++ /* Check if register is dead or set in this insn */
++ if (dead_or_set_p (scan, reg))
++ {
++ break;
++ }
++
++ }
++ }
++ }
++ }
++
++
++ if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
++ {
++
++ /* Scan through all insns looking for conditional register to
++ register move operations */
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; Folding redundant conditional move operations:\n");
++ }
++ for (insn = first; insn; insn = next_nonnote_insn (insn))
++ {
++ rtx src_reg, dst_reg, scan, test;
++
++ if (INSN_P (insn)
++ && GET_CODE (PATTERN (insn)) == COND_EXEC
++ && GET_CODE (COND_EXEC_CODE (PATTERN (insn))) == SET
++ && REG_P (SET_SRC (COND_EXEC_CODE (PATTERN (insn))))
++ && REG_P (SET_DEST (COND_EXEC_CODE (PATTERN (insn))))
++ && find_reg_note (insn, REG_DEAD, SET_SRC (COND_EXEC_CODE (PATTERN (insn)))))
++ {
++ src_reg = SET_SRC (COND_EXEC_CODE (PATTERN (insn)));
++ dst_reg = SET_DEST (COND_EXEC_CODE (PATTERN (insn)));
++ test = COND_EXEC_TEST (PATTERN (insn));
++ }
++ else
++ {
++ continue;
++ }
++
++ /* Scan backward through the rest of insns in this if-then or if-else
++ block and check if we can fold the move into another of the conditional
++ insns in the same block. */
++ scan = prev_nonnote_insn (insn);
++ while (INSN_P (scan)
++ && GET_CODE (PATTERN (scan)) == COND_EXEC
++ && rtx_equal_p (COND_EXEC_TEST (PATTERN (scan)), test))
++ {
++ rtx pattern = COND_EXEC_CODE (PATTERN (scan));
++ if ( GET_CODE (pattern) == PARALLEL )
++ pattern = XVECEXP (pattern, 0, 0);
++
++ if ( reg_set_p (src_reg, pattern) )
++ {
++ /* Fold in the destination register for the cond. move
++ into this insn. */
++ SET_DEST (pattern) = dst_reg;
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; Deleting INSN %i since this operation can be folded into INSN %i\n",
++ INSN_UID (insn), INSN_UID (scan));
++ }
+
++ /* Scan and check if any of the insns in between uses the src_reg. We
++ must then replace it with the dst_reg. */
++ while ( (scan = next_nonnote_insn (scan)) != insn ){
++ avr32_replace_reg (scan, src_reg, dst_reg);
+ }
-+ }
-+ }
-+ }
-+ }
++ /* Delete the insn. */
++ SET_INSN_DELETED (insn);
++
++ /* Force the instruction to be recognized again */
++ INSN_CODE (insn) = -1;
++ break;
++ }
++
++ /* If the destination register is used but not set in this insn
++ we cannot fold. */
++ if ( reg_mentioned_p (dst_reg, pattern) )
++ break;
++
++ scan = prev_nonnote_insn (scan);
++ }
++ }
++ }
++
++}
+
+/* Exported to toplev.c.
+
+
+static void
+avr32_reorg (void)
-+ {
-+ rtx insn;
-+ HOST_WIDE_INT address = 0;
-+ Mfix *fix;
++{
++ rtx insn;
++ HOST_WIDE_INT address = 0;
++ Mfix *fix;
+
-+ minipool_fix_head = minipool_fix_tail = NULL;
++ minipool_fix_head = minipool_fix_tail = NULL;
+
-+ /* The first insn must always be a note, or the code below won't scan it
++ /* The first insn must always be a note, or the code below won't scan it
+ properly. */
-+ insn = get_insns ();
-+ if (GET_CODE (insn) != NOTE)
-+ abort ();
++ insn = get_insns ();
++ if (GET_CODE (insn) != NOTE)
++ abort ();
+
-+ /* Scan all the insns and record the operands that will need fixing. */
-+ for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
-+ {
-+ if (GET_CODE (insn) == BARRIER)
-+ push_minipool_barrier (insn, address);
-+ else if (INSN_P (insn))
-+ {
-+ rtx table;
++ /* Scan all the insns and record the operands that will need fixing. */
++ for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
++ {
++ if (GET_CODE (insn) == BARRIER)
++ push_minipool_barrier (insn, address);
++ else if (INSN_P (insn))
++ {
++ rtx table;
+
-+ note_invalid_constants (insn, address, true);
-+ address += get_attr_length (insn);
++ note_invalid_constants (insn, address, true);
++ address += get_attr_length (insn);
+
-+ /* If the insn is a vector jump, add the size of the table and skip
++ /* If the insn is a vector jump, add the size of the table and skip
+ the table. */
-+ if ((table = is_jump_table (insn)) != NULL)
-+ {
-+ address += get_jump_table_size (table);
-+ insn = table;
-+ }
-+ }
-+ }
-+
-+ fix = minipool_fix_head;
-+
-+ /* Now scan the fixups and perform the required changes. */
-+ while (fix)
-+ {
-+ Mfix *ftmp;
-+ Mfix *fdel;
-+ Mfix *last_added_fix;
-+ Mfix *last_barrier = NULL;
-+ Mfix *this_fix;
-+
-+ /* Skip any further barriers before the next fix. */
-+ while (fix && GET_CODE (fix->insn) == BARRIER)
-+ fix = fix->next;
-+
-+ /* No more fixes. */
-+ if (fix == NULL)
-+ break;
-+
-+ last_added_fix = NULL;
++ if ((table = is_jump_table (insn)) != NULL)
++ {
++ address += get_jump_table_size (table);
++ insn = table;
++ }
++ }
++ }
+
-+ for (ftmp = fix; ftmp; ftmp = ftmp->next)
-+ {
-+ if (GET_CODE (ftmp->insn) == BARRIER)
-+ {
-+ if (ftmp->address >= minipool_vector_head->max_address)
-+ break;
++ fix = minipool_fix_head;
+
-+ last_barrier = ftmp;
-+ }
-+ else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
-+ break;
++ /* Now scan the fixups and perform the required changes. */
++ while (fix)
++ {
++ Mfix *ftmp;
++ Mfix *fdel;
++ Mfix *last_added_fix;
++ Mfix *last_barrier = NULL;
++ Mfix *this_fix;
++
++ /* Skip any further barriers before the next fix. */
++ while (fix && GET_CODE (fix->insn) == BARRIER)
++ fix = fix->next;
++
++ /* No more fixes. */
++ if (fix == NULL)
++ break;
++
++ last_added_fix = NULL;
++
++ for (ftmp = fix; ftmp; ftmp = ftmp->next)
++ {
++ if (GET_CODE (ftmp->insn) == BARRIER)
++ {
++ if (ftmp->address >= minipool_vector_head->max_address)
++ break;
++
++ last_barrier = ftmp;
++ }
++ else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
++ break;
+
-+ last_added_fix = ftmp; /* Keep track of the last fix added.
-+ */
-+ }
++ last_added_fix = ftmp; /* Keep track of the last fix added.
++ */
++ }
+
-+ /* If we found a barrier, drop back to that; any fixes that we could
++ /* If we found a barrier, drop back to that; any fixes that we could
+ have reached but come after the barrier will now go in the next
+ mini-pool. */
-+ if (last_barrier != NULL)
-+ {
-+ /* Reduce the refcount for those fixes that won't go into this pool
++ if (last_barrier != NULL)
++ {
++ /* Reduce the refcount for those fixes that won't go into this pool
+ after all. */
-+ for (fdel = last_barrier->next;
-+ fdel && fdel != ftmp; fdel = fdel->next)
-+ {
-+ fdel->minipool->refcount--;
-+ fdel->minipool = NULL;
-+ }
++ for (fdel = last_barrier->next;
++ fdel && fdel != ftmp; fdel = fdel->next)
++ {
++ fdel->minipool->refcount--;
++ fdel->minipool = NULL;
++ }
+
-+ ftmp = last_barrier;
-+ }
-+ else
-+ {
-+ /* ftmp is first fix that we can't fit into this pool and there no
++ ftmp = last_barrier;
++ }
++ else
++ {
++ /* ftmp is first fix that we can't fit into this pool and there no
+ natural barriers that we could use. Insert a new barrier in the
+ code somewhere between the previous fix and this one, and
+ arrange to jump around it. */
-+ HOST_WIDE_INT max_address;
++ HOST_WIDE_INT max_address;
+
-+ /* The last item on the list of fixes must be a barrier, so we can
++ /* The last item on the list of fixes must be a barrier, so we can
+ never run off the end of the list of fixes without last_barrier
+ being set. */
-+ if (ftmp == NULL)
-+ abort ();
++ if (ftmp == NULL)
++ abort ();
+
-+ max_address = minipool_vector_head->max_address;
-+ /* Check that there isn't another fix that is in range that we
++ max_address = minipool_vector_head->max_address;
++ /* Check that there isn't another fix that is in range that we
+ couldn't fit into this pool because the pool was already too
+ large: we need to put the pool before such an instruction. */
-+ if (ftmp->address < max_address)
-+ max_address = ftmp->address;
++ if (ftmp->address < max_address)
++ max_address = ftmp->address;
+
-+ last_barrier = create_fix_barrier (last_added_fix, max_address);
-+ }
++ last_barrier = create_fix_barrier (last_added_fix, max_address);
++ }
+
-+ assign_minipool_offsets (last_barrier);
++ assign_minipool_offsets (last_barrier);
+
-+ while (ftmp)
-+ {
-+ if (GET_CODE (ftmp->insn) != BARRIER
-+ && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
-+ == NULL))
-+ break;
++ while (ftmp)
++ {
++ if (GET_CODE (ftmp->insn) != BARRIER
++ && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
++ == NULL))
++ break;
+
-+ ftmp = ftmp->next;
-+ }
++ ftmp = ftmp->next;
++ }
+
-+ /* Scan over the fixes we have identified for this pool, fixing them up
++ /* Scan over the fixes we have identified for this pool, fixing them up
+ and adding the constants to the pool itself. */
+ for (this_fix = fix; this_fix && ftmp != this_fix;
+ this_fix = this_fix->next)
+ if (GET_CODE (this_fix->insn) != BARRIER
+ /* Do nothing for entries present just to force the insertion of
+ a minipool. */
-+ && !IS_FORCE_MINIPOOL (this_fix->value))
-+ {
-+ rtx addr = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
-+ minipool_vector_label),
-+ this_fix->minipool->offset);
-+ *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
-+ }
++ && !IS_FORCE_MINIPOOL (this_fix->value))
++ {
++ rtx addr = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
++ minipool_vector_label),
++ this_fix->minipool->offset);
++ *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
++ }
+
-+ dump_minipool (last_barrier->insn);
-+ fix = ftmp;
-+ }
++ dump_minipool (last_barrier->insn);
++ fix = ftmp;
++ }
+
-+ /* Free the minipool memory. */
-+ obstack_free (&minipool_obstack, minipool_startobj);
++ /* Free the minipool memory. */
++ obstack_free (&minipool_obstack, minipool_startobj);
+
-+ avr32_reorg_optimization ();
-+ }
++ avr32_reorg_optimization ();
++}
+
+
+/*
+ Hook for doing some final scanning of instructions. Does nothing yet...*/
+void
+avr32_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
-+ rtx * opvec ATTRIBUTE_UNUSED,
-+ int noperands ATTRIBUTE_UNUSED)
-+ {
-+ return;
-+ }
++ rtx * opvec ATTRIBUTE_UNUSED,
++ int noperands ATTRIBUTE_UNUSED)
++{
++ return;
++}
+
+
+/* Function for changing the condition on the next instruction,
+ should be used when emmiting compare instructions and
+ the condition of the next instruction needs to change.
-+ */
++*/
+int
+set_next_insn_cond (rtx cur_insn, rtx new_cond)
-+ {
-+ rtx next_insn = next_nonnote_insn (cur_insn);
-+ if ((next_insn != NULL_RTX)
-+ && (INSN_P (next_insn))
-+ && (GET_CODE (PATTERN (next_insn)) == SET)
-+ && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
-+ {
-+ /* Branch instructions */
-+ XEXP (SET_SRC (PATTERN (next_insn)), 0) = new_cond;
-+ /* Force the instruction to be recognized again */
-+ INSN_CODE (next_insn) = -1;
-+ return TRUE;
-+ }
-+ else if ((next_insn != NULL_RTX)
-+ && (INSN_P (next_insn))
-+ && (GET_CODE (PATTERN (next_insn)) == SET)
-+ && comparison_operator (SET_SRC (PATTERN (next_insn)),
-+ GET_MODE (SET_SRC (PATTERN (next_insn)))))
-+ {
-+ /* scc with no compare */
-+ SET_SRC (PATTERN (next_insn)) = new_cond;
-+ /* Force the instruction to be recognized again */
-+ INSN_CODE (next_insn) = -1;
-+ return TRUE;
-+ }
++{
++ rtx next_insn = next_nonnote_insn (cur_insn);
++ if ((next_insn != NULL_RTX)
++ && (INSN_P (next_insn)))
++ {
++ if ((GET_CODE (PATTERN (next_insn)) == SET)
++ && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
++ {
++ /* Branch instructions */
++ XEXP (SET_SRC (PATTERN (next_insn)), 0) = new_cond;
++ /* Force the instruction to be recognized again */
++ INSN_CODE (next_insn) = -1;
++ return TRUE;
++ }
++ else if ((GET_CODE (PATTERN (next_insn)) == SET)
++ && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)),
++ GET_MODE (SET_SRC (PATTERN (next_insn)))))
++ {
++ /* scc with no compare */
++ SET_SRC (PATTERN (next_insn)) = new_cond;
++ /* Force the instruction to be recognized again */
++ INSN_CODE (next_insn) = -1;
++ return TRUE;
++ }
++ else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC)
++ {
++ if ( GET_CODE (new_cond) == UNSPEC )
++ {
++ COND_EXEC_TEST (PATTERN (next_insn)) =
++ gen_rtx_UNSPEC (CCmode,
++ gen_rtvec (2,
++ XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0),
++ XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1)),
++ XINT (new_cond, 1));
++ }
++ else
++ {
++ PUT_CODE(COND_EXEC_TEST (PATTERN (next_insn)), GET_CODE(new_cond));
++ }
++ }
++ }
+
-+ return FALSE;
-+ }
++ return FALSE;
++}
+
+/* Function for obtaining the condition for the next instruction
+ after cur_insn.
-+ */
++*/
+rtx
+get_next_insn_cond (rtx cur_insn)
-+ {
-+ rtx next_insn = next_nonnote_insn (cur_insn);
-+ rtx cond = NULL_RTX;
-+ if ((next_insn != NULL_RTX)
-+ && (INSN_P (next_insn))
-+ && (GET_CODE (PATTERN (next_insn)) == SET)
-+ && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
-+ {
-+ /* Branch instructions */
-+ cond = XEXP (SET_SRC (PATTERN (next_insn)), 0);
-+ }
-+ else if ((next_insn != NULL_RTX)
-+ && (INSN_P (next_insn))
-+ && (GET_CODE (PATTERN (next_insn)) == SET)
-+ && comparison_operator (SET_SRC (PATTERN (next_insn)),
-+ GET_MODE (SET_SRC (PATTERN (next_insn)))))
-+ {
-+ /* scc with no compare */
-+ cond = SET_SRC (PATTERN (next_insn));
-+ }
-+
-+ return cond;
-+ }
++{
++ rtx next_insn = next_nonnote_insn (cur_insn);
++ rtx cond = NULL_RTX;
++ if (next_insn != NULL_RTX
++ && INSN_P (next_insn))
++ {
++ if ((GET_CODE (PATTERN (next_insn)) == SET)
++ && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
++ {
++ /* Branch and cond if then else instructions */
++ cond = XEXP (SET_SRC (PATTERN (next_insn)), 0);
++ }
++ else if ((GET_CODE (PATTERN (next_insn)) == SET)
++ && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)),
++ GET_MODE (SET_SRC (PATTERN (next_insn)))))
++ {
++ /* scc with no compare */
++ cond = SET_SRC (PATTERN (next_insn));
++ }
++ else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC)
++ {
++ cond = COND_EXEC_TEST (PATTERN (next_insn));
++ }
++ }
++ return cond;
++}
+
+
+rtx
+avr32_output_cmp (rtx cond, enum machine_mode mode, rtx op0, rtx op1)
-+ {
++{
+
-+ rtx new_cond = NULL_RTX;
-+ rtx ops[2];
-+ rtx compare_pattern;
-+ ops[0] = op0;
-+ ops[1] = op1;
++ rtx new_cond = NULL_RTX;
++ rtx ops[2];
++ rtx compare_pattern;
++ ops[0] = op0;
++ ops[1] = op1;
+
++ if ( GET_CODE (op0) == AND )
++ compare_pattern = op0;
++ else
+ compare_pattern = gen_rtx_COMPARE (mode, op0, op1);
+
-+ new_cond = is_compare_redundant (compare_pattern, cond);
++ new_cond = is_compare_redundant (compare_pattern, cond);
++
++ if (new_cond != NULL_RTX)
++ return new_cond;
+
-+ if (new_cond != NULL_RTX)
-+ return new_cond;
++ /* Check if we are inserting a bit-load instead of a compare. */
++ if ( GET_CODE (op0) == AND )
++ {
++ ops[0] = XEXP (op0, 0);
++ ops[1] = XEXP (op0, 1);
++ output_asm_insn ("bld\t%0, %p1", ops);
++ return cond;
++ }
+
-+ /* Insert compare */
-+ switch (mode)
++ /* Insert compare */
++ switch (mode)
+ {
+ case QImode:
+ output_asm_insn ("cp.b\t%0, %1", ops);
+ break;
+ case DImode:
+ if (GET_CODE (op1) != REG)
-+ output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0", ops);
++ output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0", ops);
+ else
-+ output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0, %m1", ops);
++ output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0, %m1", ops);
+ break;
+ default:
+ internal_error ("Unknown comparison mode");
+ break;
+ }
+
-+ return cond;
-+ }
++ return cond;
++}
+
+int
+avr32_load_multiple_operation (rtx op,
-+ enum machine_mode mode ATTRIBUTE_UNUSED)
-+ {
-+ int count = XVECLEN (op, 0);
-+ unsigned int dest_regno;
-+ rtx src_addr;
-+ rtx elt;
-+ int i = 1, base = 0;
++ enum machine_mode mode ATTRIBUTE_UNUSED)
++{
++ int count = XVECLEN (op, 0);
++ unsigned int dest_regno;
++ rtx src_addr;
++ rtx elt;
++ int i = 1, base = 0;
+
-+ if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
-+ return 0;
++ if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
++ return 0;
++
++ /* Check to see if this might be a write-back. */
++ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
++ {
++ i++;
++ base = 1;
++
++ /* Now check it more carefully. */
++ if (GET_CODE (SET_DEST (elt)) != REG
++ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
++ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
++ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
++ return 0;
++ }
++
++ /* Perform a quick check so we don't blow up below. */
++ if (count <= 1
++ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
++ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
++ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
++ return 0;
++
++ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
++ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
++
++ for (; i < count; i++)
++ {
++ elt = XVECEXP (op, 0, i);
++
++ if (GET_CODE (elt) != SET
++ || GET_CODE (SET_DEST (elt)) != REG
++ || GET_MODE (SET_DEST (elt)) != SImode
++ || GET_CODE (SET_SRC (elt)) != UNSPEC)
++ return 0;
++ }
++
++ return 1;
++}
++
++int
++avr32_store_multiple_operation (rtx op,
++ enum machine_mode mode ATTRIBUTE_UNUSED)
++{
++ int count = XVECLEN (op, 0);
++ int src_regno;
++ rtx dest_addr;
++ rtx elt;
++ int i = 1;
++
++ if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
++ return 0;
++
++ /* Perform a quick check so we don't blow up below. */
++ if (count <= i
++ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
++ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
++ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
++ return 0;
++
++ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
++ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
++
++ for (; i < count; i++)
++ {
++ elt = XVECEXP (op, 0, i);
++
++ if (GET_CODE (elt) != SET
++ || GET_CODE (SET_DEST (elt)) != MEM
++ || GET_MODE (SET_DEST (elt)) != SImode
++ || GET_CODE (SET_SRC (elt)) != UNSPEC)
++ return 0;
++ }
++
++ return 1;
++}
++
++int
++avr32_valid_macmac_bypass (rtx insn_out, rtx insn_in)
++{
++ /* Check if they use the same accumulator */
++ if (rtx_equal_p
++ (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
++ {
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
++int
++avr32_valid_mulmac_bypass (rtx insn_out, rtx insn_in)
++{
++ /*
++ Check if the mul instruction produces the accumulator for the mac
++ instruction. */
++ if (rtx_equal_p
++ (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
++ {
++ return TRUE;
++ }
++ return FALSE;
++}
++
++int
++avr32_store_bypass (rtx insn_out, rtx insn_in)
++{
++ /* Only valid bypass if the output result is used as an src in the store
++ instruction, NOT if used as a pointer or base. */
++ if (rtx_equal_p
++ (SET_DEST (PATTERN (insn_out)), SET_SRC (PATTERN (insn_in))))
++ {
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
++int
++avr32_mul_waw_bypass (rtx insn_out, rtx insn_in)
++{
++ /* Check if the register holding the result from the mul instruction is
++ used as a result register in the input instruction. */
++ if (rtx_equal_p
++ (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
++ {
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
++int
++avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in)
++{
++ /* Check if the first loaded word in insn_out is used in insn_in. */
++ rtx dst_reg;
++ rtx second_loaded_reg;
++
++ /* If this is a double alu operation then the bypass is not valid */
++ if ((get_attr_type (insn_in) == TYPE_ALU
++ || get_attr_type (insn_in) == TYPE_ALU2)
++ && (GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (insn_out)))) > 4))
++ return FALSE;
++
++ /* Get the destination register in the load */
++ if (!REG_P (SET_DEST (PATTERN (insn_out))))
++ return FALSE;
++
++ dst_reg = SET_DEST (PATTERN (insn_out));
++ second_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 1);
++
++ if (!reg_mentioned_p (second_loaded_reg, PATTERN (insn_in)))
++ return TRUE;
++
++ return FALSE;
++}
++
++
++int
++avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in)
++{
++ /*
++ Check if the two first loaded word in insn_out are used in insn_in. */
++ rtx dst_reg;
++ rtx third_loaded_reg, fourth_loaded_reg;
++
++ /* Get the destination register in the load */
++ if (!REG_P (SET_DEST (PATTERN (insn_out))))
++ return FALSE;
++
++ dst_reg = SET_DEST (PATTERN (insn_out));
++ third_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 2);
++ fourth_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 3);
++
++ if (!reg_mentioned_p (third_loaded_reg, PATTERN (insn_in))
++ && !reg_mentioned_p (fourth_loaded_reg, PATTERN (insn_in)))
++ {
++ return TRUE;
++ }
++
++ return FALSE;
++}
+
-+ /* Check to see if this might be a write-back. */
-+ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
-+ {
-+ i++;
-+ base = 1;
-+
-+ /* Now check it more carefully. */
-+ if (GET_CODE (SET_DEST (elt)) != REG
-+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
-+ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
-+ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
-+ return 0;
-+ }
+
-+ /* Perform a quick check so we don't blow up below. */
-+ if (count <= 1
-+ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
-+ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
-+ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
-+ return 0;
+
-+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
-+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
++rtx
++avr32_ifcvt_modify_test (ce_if_block_t *ce_info,
++ rtx test ){
++ rtx branch_insn;
++ rtx cmp_test;
++ rtx compare_op0;
++ rtx compare_op1;
++
++
++ if ( !ce_info
++ || test == NULL_RTX
++ || !reg_mentioned_p (cc0_rtx, test))
++ return test;
++
++ branch_insn = BB_END (ce_info->test_bb);
++ cmp_test = PATTERN(prev_nonnote_insn (branch_insn));
++
++ if (GET_CODE(cmp_test) != SET
++ || !CC0_P(XEXP(cmp_test, 0)) )
++ return cmp_test;
++
++ if ( GET_CODE(SET_SRC(cmp_test)) == COMPARE ){
++ compare_op0 = XEXP(SET_SRC(cmp_test), 0);
++ compare_op1 = XEXP(SET_SRC(cmp_test), 1);
++ } else {
++ compare_op0 = SET_SRC(cmp_test);
++ compare_op1 = const0_rtx;
++ }
+
-+ for (; i < count; i++)
-+ {
-+ elt = XVECEXP (op, 0, i);
++ return gen_rtx_fmt_ee (GET_CODE(test), GET_MODE (compare_op0),
++ compare_op0, compare_op1);
++}
+
-+ if (GET_CODE (elt) != SET
-+ || GET_CODE (SET_DEST (elt)) != REG
-+ || GET_MODE (SET_DEST (elt)) != SImode
-+ || GET_CODE (SET_SRC (elt)) != UNSPEC)
-+ return 0;
-+ }
+
-+ return 1;
-+ }
+
-+int
-+avr32_store_multiple_operation (rtx op,
-+ enum machine_mode mode ATTRIBUTE_UNUSED)
-+ {
-+ int count = XVECLEN (op, 0);
-+ int src_regno;
-+ rtx dest_addr;
-+ rtx elt;
-+ int i = 1;
++rtx
++avr32_ifcvt_modify_insn (ce_if_block_t *ce_info,
++ rtx pattern,
++ rtx insn,
++ int *num_true_changes){
++ rtx test = COND_EXEC_TEST(pattern);
++ rtx op = COND_EXEC_CODE(pattern);
++ rtx cmp_insn;
++ rtx cond_exec_insn;
++ int inputs_set_outside_ifblock = 1;
++ basic_block current_bb = BLOCK_FOR_INSN (insn);
++ rtx bb_insn ;
++ enum machine_mode mode = GET_MODE (XEXP (op, 0));
++
++ if (CC0_P(XEXP(test, 0)))
++ test = avr32_ifcvt_modify_test (ce_info,
++ test );
++
++ pattern = gen_rtx_COND_EXEC (VOIDmode, test, op);
++
++ if ( !reload_completed )
++ {
++ rtx start;
++ int num_insns;
++ int max_insns = MAX_CONDITIONAL_EXECUTE;
++
++ if ( !ce_info )
++ return op;
++
++ /* Check if the insn is not suitable for conditional
++ execution. */
++ start_sequence ();
++ cond_exec_insn = emit_insn (pattern);
++ if ( recog_memoized (cond_exec_insn) < 0
++ && !no_new_pseudos )
++ {
++ /* Insn is not suitable for conditional execution, try
++ to fix it up by using an extra scratch register or
++ by pulling the operation outside the if-then-else
++ and then emiting a conditional move inside the if-then-else. */
++ end_sequence ();
++ if ( GET_CODE (op) != SET
++ || !REG_P (SET_DEST (op))
++ || GET_CODE (SET_SRC (op)) == IF_THEN_ELSE
++ || GET_MODE_SIZE (mode) > UNITS_PER_WORD )
++ return NULL_RTX;
++
++ /* Check if any of the input operands to the insn is set inside the
++ current block. */
++ if ( current_bb->index == ce_info->then_bb->index )
++ start = PREV_INSN (BB_HEAD (ce_info->then_bb));
++ else
++ start = PREV_INSN (BB_HEAD (ce_info->else_bb));
+
-+ if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
-+ return 0;
+
-+ /* Perform a quick check so we don't blow up below. */
-+ if (count <= i
-+ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
-+ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
-+ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
-+ return 0;
++ for ( bb_insn = next_nonnote_insn (start); bb_insn != insn; bb_insn = next_nonnote_insn (bb_insn) )
++ {
++ rtx set = single_set (bb_insn);
+
-+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
-+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
++ if ( set && reg_mentioned_p (SET_DEST (set), SET_SRC (op)))
++ {
++ inputs_set_outside_ifblock = 0;
++ break;
++ }
++ }
+
-+ for (; i < count; i++)
-+ {
-+ elt = XVECEXP (op, 0, i);
++ cmp_insn = prev_nonnote_insn (BB_END (ce_info->test_bb));
+
-+ if (GET_CODE (elt) != SET
-+ || GET_CODE (SET_DEST (elt)) != MEM
-+ || GET_MODE (SET_DEST (elt)) != SImode
-+ || GET_CODE (SET_SRC (elt)) != UNSPEC)
-+ return 0;
-+ }
+
-+ return 1;
-+ }
++ /* Check if we can insert more insns. */
++ num_insns = ( ce_info->num_then_insns +
++ ce_info->num_else_insns +
++ ce_info->num_cond_clobber_insns +
++ ce_info->num_extra_move_insns );
+
-+int
-+avr32_valid_macmac_bypass (rtx insn_out, rtx insn_in)
-+ {
-+ /* Check if they use the same accumulator */
-+ if (rtx_equal_p
-+ (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
-+ {
-+ return TRUE;
-+ }
++ if ( ce_info->num_else_insns != 0 )
++ max_insns *=2;
+
-+ return FALSE;
-+ }
++ if ( num_insns >= max_insns )
++ return NULL_RTX;
+
-+int
-+avr32_valid_mulmac_bypass (rtx insn_out, rtx insn_in)
-+ {
-+ /*
-+ Check if the mul instruction produces the accumulator for the mac
-+ instruction. */
-+ if (rtx_equal_p
-+ (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
-+ {
-+ return TRUE;
-+ }
-+ return FALSE;
-+ }
++ /* Check if we have an instruction which might be converted to
++ conditional form if we give it a scratch register to clobber. */
++ {
++ rtx clobber_insn;
++ rtx scratch_reg = gen_reg_rtx (mode);
++ rtx new_pattern = copy_rtx (pattern);
++ rtx set_src = SET_SRC (COND_EXEC_CODE (new_pattern));
++
++ rtx clobber = gen_rtx_CLOBBER (mode, scratch_reg);
++ rtx vec[2] = { COND_EXEC_CODE (new_pattern), clobber };
++ COND_EXEC_CODE (new_pattern) = gen_rtx_PARALLEL (mode, gen_rtvec_v (2, vec));
++
++ start_sequence ();
++ clobber_insn = emit_insn (new_pattern);
++
++ if ( recog_memoized (clobber_insn) >= 0
++ && ( ( GET_RTX_LENGTH (GET_CODE (set_src)) == 2
++ && CONST_INT_P (XEXP (set_src, 1))
++ && avr32_const_ok_for_constraint_p (INTVAL (XEXP (set_src, 1)), 'K', "Ks08") )
++ || !ce_info->else_bb
++ || current_bb->index == ce_info->else_bb->index ))
++ {
++ end_sequence ();
++ /* Force the insn to be recognized again. */
++ INSN_CODE (insn) = -1;
+
-+int
-+avr32_store_bypass (rtx insn_out, rtx insn_in)
-+ {
-+ /* Only valid bypass if the output result is used as an src in the store
-+ instruction, NOT if used as a pointer or base. */
-+ if (rtx_equal_p
-+ (SET_DEST (PATTERN (insn_out)), SET_SRC (PATTERN (insn_in))))
-+ {
-+ return TRUE;
-+ }
++ /* If this is the first change in this IF-block then
++ signal that we have made a change. */
++ if ( ce_info->num_cond_clobber_insns == 0
++ && ce_info->num_extra_move_insns == 0 )
++ *num_true_changes += 1;
+
-+ return FALSE;
-+ }
++ ce_info->num_cond_clobber_insns++;
+
-+int
-+avr32_mul_waw_bypass (rtx insn_out, rtx insn_in)
-+ {
-+ /* Check if the register holding the result from the mul instruction is
-+ used as a result register in the input instruction. */
-+ if (rtx_equal_p
-+ (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
-+ {
-+ return TRUE;
-+ }
++ if (dump_file)
++ fprintf (dump_file,
++ "\nReplacing INSN %d with an insn using a scratch register for later ifcvt passes...\n",
++ INSN_UID (insn));
+
-+ return FALSE;
-+ }
++ return COND_EXEC_CODE (new_pattern);
++ }
++ end_sequence ();
++ }
+
-+int
-+avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in)
-+ {
-+ /* Check if the first loaded word in insn_out is used in insn_in. */
-+ rtx dst_reg;
-+ rtx second_loaded_reg;
++ if ( inputs_set_outside_ifblock )
++ {
++ /* Check if the insn before the cmp is an and which used
++ together with the cmp can be optimized into a bld. If
++ so then we should try to put the insn before the and
++ so that we can catch the bld peephole. */
++ rtx set;
++ rtx insn_before_cmp_insn = prev_nonnote_insn (cmp_insn);
++ if (insn_before_cmp_insn
++ && (set = single_set (insn_before_cmp_insn))
++ && GET_CODE (SET_SRC (set)) == AND
++ && one_bit_set_operand (XEXP (SET_SRC (set), 1), SImode)
++ /* Also make sure that the insn does not set any
++ of the input operands to the insn we are pulling out. */
++ && !reg_mentioned_p (SET_DEST (set), SET_SRC (op)) )
++ cmp_insn = prev_nonnote_insn (cmp_insn);
++
++ /* We can try to put the operation outside the if-then-else
++ blocks and insert a move. */
++ if ( !insn_invalid_p (insn)
++ /* Do not allow conditional insns to be moved outside the
++ if-then-else. */
++ && !reg_mentioned_p (cc0_rtx, insn)
++ /* We cannot move memory loads outside of the if-then-else
++ since the memory access should not be perfomed if the
++ condition is not met. */
++ && !mem_mentioned_p (SET_SRC (op)) )
++ {
++ rtx scratch_reg = gen_reg_rtx (mode);
++ rtx op_pattern = copy_rtx (op);
++ rtx new_insn, seq;
++ rtx link, prev_link;
++ op = copy_rtx (op);
++ /* Emit the operation to a temp reg before the compare,
++ and emit a move inside the if-then-else, hoping that the
++ whole if-then-else can be converted to conditional
++ execution. */
++ SET_DEST (op_pattern) = scratch_reg;
++ start_sequence ();
++ new_insn = emit_insn (op_pattern);
++ seq = get_insns();
++ end_sequence ();
++
++ /* Check again that the insn is valid. For some insns the insn might
++ become invalid if the destination register is changed. Ie. for mulacc
++ operations. */
++ if ( insn_invalid_p (new_insn) )
++ return NULL_RTX;
++
++ emit_insn_before_setloc (seq, cmp_insn, INSN_LOCATOR (insn));
++
++ if (dump_file)
++ fprintf (dump_file,
++ "\nMoving INSN %d out of IF-block by adding INSN %d...\n",
++ INSN_UID (insn), INSN_UID (new_insn));
++
++ ce_info->extra_move_insns[ce_info->num_extra_move_insns] = insn;
++ ce_info->moved_insns[ce_info->num_extra_move_insns] = new_insn;
++ XEXP (op, 1) = scratch_reg;
++ /* Force the insn to be recognized again. */
++ INSN_CODE (insn) = -1;
++
++ /* Move REG_DEAD notes to the moved insn. */
++ prev_link = NULL_RTX;
++ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
++ {
++ if (REG_NOTE_KIND (link) == REG_DEAD)
++ {
++ /* Add the REG_DEAD note to the new insn. */
++ rtx dead_reg = XEXP (link, 0);
++ REG_NOTES (new_insn) = gen_rtx_EXPR_LIST (REG_DEAD, dead_reg, REG_NOTES (new_insn));
++ /* Remove the REG_DEAD note from the insn we convert to a move. */
++ if ( prev_link )
++ XEXP (prev_link, 1) = XEXP (link, 1);
++ else
++ REG_NOTES (insn) = XEXP (link, 1);
++ }
++ else
++ {
++ prev_link = link;
++ }
++ }
++ /* Add a REG_DEAD note to signal that the scratch register is dead. */
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, scratch_reg, REG_NOTES (insn));
+
-+ /* If this is a double alu operation then the bypass is not valid */
-+ if ((get_attr_type (insn_in) == TYPE_ALU
-+ || get_attr_type (insn_in) == TYPE_ALU2)
-+ && (GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (insn_out)))) > 4))
-+ return FALSE;
++ /* If this is the first change in this IF-block then
++ signal that we have made a change. */
++ if ( ce_info->num_cond_clobber_insns == 0
++ && ce_info->num_extra_move_insns == 0 )
++ *num_true_changes += 1;
+
-+ /* Get the destination register in the load */
-+ if (!REG_P (SET_DEST (PATTERN (insn_out))))
-+ return FALSE;
++ ce_info->num_extra_move_insns++;
++ return op;
++ }
++ }
+
-+ dst_reg = SET_DEST (PATTERN (insn_out));
-+ second_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 1);
++ /* We failed to fixup the insns, so this if-then-else can not be made
++ conditional. Just return NULL_RTX so that the if-then-else conversion
++ for this if-then-else will be cancelled. */
++ return NULL_RTX;
++ }
++ end_sequence ();
++ return op;
++ }
+
-+ if (!reg_mentioned_p (second_loaded_reg, PATTERN (insn_in)))
-+ return TRUE;
++ /* Signal that we have started if conversion after reload, which means
++ that it should be safe to split all the predicable clobber insns which
++ did not become cond_exec back into a simpler form if possible. */
++ cfun->machine->ifcvt_after_reload = 1;
+
-+ return FALSE;
-+ }
++ return pattern;
++}
+
+
-+int
-+avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in)
-+ {
-+ /*
-+ Check if the two first loaded word in insn_out are used in insn_in. */
-+ rtx dst_reg;
-+ rtx third_loaded_reg, fourth_loaded_reg;
++void
++avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info,
++ int *num_true_changes)
++{
++ int n;
+
-+ /* Get the destination register in the load */
-+ if (!REG_P (SET_DEST (PATTERN (insn_out))))
-+ return FALSE;
++ if ( ce_info->num_extra_move_insns > 0
++ && ce_info->num_cond_clobber_insns == 0)
++ /* Signal that we did not do any changes after all. */
++ *num_true_changes -= 1;
+
-+ dst_reg = SET_DEST (PATTERN (insn_out));
-+ third_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 2);
-+ fourth_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 3);
++ /* Remove any inserted move insns. */
++ for ( n = 0; n < ce_info->num_extra_move_insns; n++ )
++ {
++ rtx link, prev_link;
+
-+ if (!reg_mentioned_p (third_loaded_reg, PATTERN (insn_in))
-+ && !reg_mentioned_p (fourth_loaded_reg, PATTERN (insn_in)))
-+ {
-+ return TRUE;
-+ }
++ /* Remove REG_DEAD note since we are not needing the scratch register anyway. */
++ prev_link = NULL_RTX;
++ for (link = REG_NOTES (ce_info->extra_move_insns[n]); link; link = XEXP (link, 1))
++ {
++ if (REG_NOTE_KIND (link) == REG_DEAD)
++ {
++ if ( prev_link )
++ XEXP (prev_link, 1) = XEXP (link, 1);
++ else
++ REG_NOTES (ce_info->extra_move_insns[n]) = XEXP (link, 1);
++ }
++ else
++ {
++ prev_link = link;
++ }
++ }
+
-+ return FALSE;
-+ }
++ /* Revert all reg_notes for the moved insn. */
++ for (link = REG_NOTES (ce_info->moved_insns[n]); link; link = XEXP (link, 1))
++ {
++ REG_NOTES (ce_info->extra_move_insns[n]) = gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
++ XEXP (link, 0),
++ REG_NOTES (ce_info->extra_move_insns[n]));
++ }
+
++ /* Remove the moved insn. */
++ remove_insn ( ce_info->moved_insns[n] );
++ }
++}
+
-+//section *
-+//avr32_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED,
-+// rtx x ATTRIBUTE_UNUSED,
-+// unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
-+// {
-+// /* Let ASM_OUTPUT_POOL_PROLOGUE take care of this */
-+// return 0;
-+// }
++/* Function returning TRUE if INSN with OPERANDS is a splittable
++ conditional immediate clobber insn. We assume that the insn is
++ already a conditional immediate clobber insns and do not check
++ for that. */
++int
++avr32_cond_imm_clobber_splittable (rtx insn,
++ rtx operands[])
++{
++ if ( (REGNO (operands[0]) != REGNO (operands[1]))
++ && (logical_binary_operator (SET_SRC (XVECEXP (PATTERN (insn),0,0)), VOIDmode)
++ || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == PLUS
++ && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'I', "Is16"))
++ || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == MINUS
++ && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks16"))) )
++ return FALSE;
+
++ return TRUE;
++}
+
-+/* Function for getting an integer value from a const_int or const_double
-+ expression regardless of the HOST_WIDE_INT size. Each target cpu word
-+ will be put into the val array where the LSW will be stored at the lowest
-+ address and so forth. Assumes that const_expr is either a const_int or
++/* Function for getting an integer value from a const_int or const_double
++ expression regardless of the HOST_WIDE_INT size. Each target cpu word
++ will be put into the val array where the LSW will be stored at the lowest
++ address and so forth. Assumes that const_expr is either a const_int or
+ const_double. Only valid for modes which have sizes that are a multiple
-+ of the word size.
++ of the word size.
+*/
+void
+avr32_get_intval (enum machine_mode mode,
-+ rtx const_expr,
++ rtx const_expr,
+ HOST_WIDE_INT *val)
+{
+ int words_in_mode = GET_MODE_SIZE (mode)/UNITS_PER_WORD;
+ const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
-+
++
+ if ( GET_CODE(const_expr) == CONST_DOUBLE ){
+ HOST_WIDE_INT hi = CONST_DOUBLE_HIGH(const_expr);
+ HOST_WIDE_INT lo = CONST_DOUBLE_LOW(const_expr);
+ int rshift = (words_in_const_int-1) * BITS_PER_WORD;
+ val[word] = (value << lshift) >> rshift;
+ }
-+
++
+ for ( ; word < words_in_mode; word++ ){
+ /* Just put the sign bits in the remaining words. */
+ val[word] = value < 0 ? -1 : 0;
+ }
-+ }
++ }
+}
+
+void
+avr32_split_const_expr (enum machine_mode mode,
+ enum machine_mode new_mode,
-+ rtx expr,
++ rtx expr,
+ rtx *split_expr)
+{
+ int i, word;
+ int words_in_split_values = GET_MODE_SIZE (new_mode)/UNITS_PER_WORD;
+ const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
+ HOST_WIDE_INT *val = alloca (words_in_intval * UNITS_PER_WORD);
-+
++
+ avr32_get_intval (mode, expr, val);
-+
++
+ for ( i=0; i < (words_in_intval/words_in_split_values); i++ )
+ {
-+ HOST_WIDE_INT value_lo = 0, value_hi = 0;
++ HOST_WIDE_INT value_lo = 0, value_hi = 0;
+ for ( word = 0; word < words_in_split_values; word++ )
+ {
+ if ( word >= words_in_const_int )
-+ value_hi |= ((val[i * words_in_split_values + word] &
-+ (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
++ value_hi |= ((val[i * words_in_split_values + word] &
++ (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
+ << (BITS_PER_WORD * (word - words_in_const_int)));
+ else
-+ value_lo |= ((val[i * words_in_split_values + word] &
-+ (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
++ value_lo |= ((val[i * words_in_split_values + word] &
++ (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
+ << (BITS_PER_WORD * word));
+ }
+ split_expr[i] = immed_double_const(value_lo, value_hi, new_mode);
+}
+
+
-+
-+/* Set up library functions to comply to AVR32 ABI */
-+
-+
+/* Set up library functions to comply to AVR32 ABI */
+
+static void
+avr32_init_libfuncs (void)
+{
+ /* Convert gcc run-time function names to AVR32 ABI names */
-+
++
+ /* Double-precision floating-point arithmetic. */
-+ set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div");
-+ set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul");
+ set_optab_libfunc (neg_optab, DFmode, NULL);
+
+ /* Double-precision comparisons. */
+ /* Floating point library functions which have fast versions. */
+ if ( TARGET_FAST_FLOAT )
+ {
++ set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div_fast");
++ set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul_fast");
+ set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add_fast");
+ set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub_fast");
+ set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add_fast");
+ }
+ else
+ {
++ set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div");
++ set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul");
+ set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add");
+ set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub");
+ set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add");
+ set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div");
+ }
+}
-+
-Index: gcc-4.2.3/gcc/config/avr32/avr32-elf.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/avr32-elf.h 2008-05-21 13:45:54.149288905 +0200
+--- /dev/null
++++ b/gcc/config/avr32/avr32-elf.h
@@ -0,0 +1,84 @@
+/*
+ Elf specific definitions.
+#define STARTFILE_SPEC "crt0%O%s crti%O%s crtbegin%O%s"
+
+#undef LINK_SPEC
-+#define LINK_SPEC "%{muse-oscall:--defsym __do_not_use_oscall_coproc__=0} %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}} %{mpart=*:-mavr32elf_%*} %{mcpu=*:-mavr32elf_%*}"
++#define LINK_SPEC "%{muse-oscall:--defsym __do_not_use_oscall_coproc__=0} %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}} %{mpart=uc3a3revd:-mavr32elf_uc3a3256s;:%{mpart=*:-mavr32elf_%*}} %{mcpu=*:-mavr32elf_%*}"
+
+
+/*
+ builtin_define ("__AVR32_FAST_FLOAT__"); \
+ } \
+ while (0)
-Index: gcc-4.2.3/gcc/config/avr32/avr32.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/avr32.h 2008-05-21 13:45:54.157287690 +0200
-@@ -0,0 +1,3281 @@
+--- /dev/null
++++ b/gcc/config/avr32/avr32.h
+@@ -0,0 +1,3347 @@
+/*
+ Definitions of target machine for AVR32.
+ Copyright 2003-2006 Atmel Corporation.
+#define IS_NAKED(t) (t & AVR32_FT_NAKED)
+#define IS_NESTED(t) (t & AVR32_FT_NESTED)
+
++#define SYMBOL_FLAG_RMW_ADDR_SHIFT SYMBOL_FLAG_MACH_DEP_SHIFT
++#define SYMBOL_REF_RMW_ADDR(RTX) \
++ ((SYMBOL_REF_FLAGS (RTX) & (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT)) != 0)
++
+
+typedef struct minipool_labels
+GTY ((chain_next ("%h.next"), chain_prev ("%h.prev")))
+ memory expression */
+ minipool_labels *minipool_label_head;
+ minipool_labels *minipool_label_tail;
++ int ifcvt_after_reload;
+} machine_function;
+
+/* Initialize data used by insn expanders. This is called from insn_emit,
+ *****************************************************************************/
+
+#ifndef ASM_SPEC
-+#define ASM_SPEC "%{fpic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{march=*:-march=%*} %{mpart=*:-mpart=%*}"
++#define ASM_SPEC "%{fpic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{march=ucr2nomul:-march=ucr2;:%{march=*:-march=%*}} %{mpart=uc3a3revd:-mpart=uc3a3256s;:%{mpart=*:-mpart=%*}}"
+#endif
+
+#ifndef MULTILIB_DEFAULTS
+#define TARGET_VERSION fprintf(stderr, " (AVR32, GNU assembler syntax)");
+#endif
+
++
+/* Part types. Keep this in sync with the order of avr32_part_types in avr32.c*/
+enum part_type
+{
+ PART_TYPE_AVR32_NONE,
+ PART_TYPE_AVR32_AP7000,
-+ PART_TYPE_AVR32_AP7010,
-+ PART_TYPE_AVR32_AP7020,
++ PART_TYPE_AVR32_AP7001,
++ PART_TYPE_AVR32_AP7002,
++ PART_TYPE_AVR32_AP7200,
++ PART_TYPE_AVR32_UC3A0128,
+ PART_TYPE_AVR32_UC3A0256,
+ PART_TYPE_AVR32_UC3A0512,
++ PART_TYPE_AVR32_UC3A0512ES,
+ PART_TYPE_AVR32_UC3A1128,
+ PART_TYPE_AVR32_UC3A1256,
+ PART_TYPE_AVR32_UC3A1512,
++ PART_TYPE_AVR32_UC3A1512ES,
++ PART_TYPE_AVR32_UC3A3REVD,
++ PART_TYPE_AVR32_UC3A364,
++ PART_TYPE_AVR32_UC3A364S,
++ PART_TYPE_AVR32_UC3A3128,
++ PART_TYPE_AVR32_UC3A3128S,
++ PART_TYPE_AVR32_UC3A3256,
++ PART_TYPE_AVR32_UC3A3256S,
+ PART_TYPE_AVR32_UC3B064,
+ PART_TYPE_AVR32_UC3B0128,
+ PART_TYPE_AVR32_UC3B0256,
++ PART_TYPE_AVR32_UC3B0256ES,
+ PART_TYPE_AVR32_UC3B164,
+ PART_TYPE_AVR32_UC3B1128,
-+ PART_TYPE_AVR32_UC3B1256
++ PART_TYPE_AVR32_UC3B1256,
++ PART_TYPE_AVR32_UC3B1256ES
+};
+
+/* Microarchitectures. */
+enum architecture_type
+{
+ ARCH_TYPE_AVR32_AP,
-+ ARCH_TYPE_AVR32_UC,
++ ARCH_TYPE_AVR32_UCR1,
++ ARCH_TYPE_AVR32_UCR2,
++ ARCH_TYPE_AVR32_UCR2NOMUL,
+ ARCH_TYPE_AVR32_NONE
+};
+
+#define FLAG_AVR32_HAS_RETURN_STACK (1 << 5)
+/* Flag specifying if the cpu has caches. */
+#define FLAG_AVR32_HAS_CACHES (1 << 6)
++/* Flag specifying if the cpu has support for v2 insns. */
++#define FLAG_AVR32_HAS_V2_INSNS (1 << 7)
++/* Flag specifying that the cpu has buggy mul insns. */
++#define FLAG_AVR32_HAS_NO_MUL_INSNS (1 << 8)
+
+/* Structure for holding information about different avr32 CPUs/parts */
+struct part_type_s
+#define TARGET_UNALIGNED_WORD (avr32_arch->feature_flags & FLAG_AVR32_HAS_UNALIGNED_WORD)
+#define TARGET_BRANCH_PRED (avr32_arch->feature_flags & FLAG_AVR32_HAS_BRANCH_PRED)
+#define TARGET_RETURN_STACK (avr32_arch->feature_flags & FLAG_AVR32_HAS_RETURN_STACK)
++#define TARGET_V2_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_V2_INSNS)
+#define TARGET_CACHES (avr32_arch->feature_flags & FLAG_AVR32_HAS_CACHES)
++#define TARGET_NO_MUL_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_NO_MUL_INSNS)
++#define TARGET_ARCH_AP (avr32_arch->arch_type == ARCH_TYPE_AVR32_AP)
++#define TARGET_ARCH_UCR1 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR1)
++#define TARGET_ARCH_UCR2 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR2)
++#define TARGET_ARCH_UC (TARGET_ARCH_UCR1 || TARGET_ARCH_UCR2)
++#define TARGET_UARCH_AVR32A (avr32_arch->uarch_type == UARCH_TYPE_AVR32A)
++#define TARGET_UARCH_AVR32B (avr32_arch->uarch_type == UARCH_TYPE_AVR32B)
+
+#define CAN_DEBUG_WITHOUT_FP
+
++
++
++
+/******************************************************************************
+ * Storage Layout
+ *****************************************************************************/
+ UNSIGNEDP = 0; \
+ (M) = SImode; \
+ } \
-+ }
++ }
+
+#define PROMOTE_FUNCTION_MODE(M, UNSIGNEDP, TYPE) \
+ { \
+ { \
+ (M) = SImode; \
+ } \
-+ }
++ }
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define CONSTRAINT_LEN(C, STR) \
+ ( ((C) == 'K' || (C) == 'I') ? 4 : \
+ ((C) == 'R') ? 5 : \
-+ ((C) == 'N' || (C) == 'O' || \
-+ (C) == 'P' || (C) == 'L' || (C) == 'J') ? -1 : \
++ ((C) == 'P') ? -1 : \
+ DEFAULT_CONSTRAINT_LEN((C), (STR)) )
+
+#define CONST_OK_FOR_CONSTRAINT_P(VALUE, C, STR) \
+ (C) == 'T' ? avr32_const_pool_ref_operand(OP, GET_MODE(OP)) : \
+ (C) == 'U' ? SYMBOL_REF_RCALL_FUNCTION_P(OP) : \
+ (C) == 'Z' ? avr32_cop_memory_operand(OP, GET_MODE(OP)) : \
++ (C) == 'Q' ? avr32_non_rmw_memory_operand(OP, GET_MODE(OP)) : \
++ (C) == 'Y' ? avr32_rmw_memory_operand(OP, GET_MODE(OP)) : \
+ 0)
+
+
+#define EXTRA_MEMORY_CONSTRAINT(C, STR) ( ((C) == 'R') || \
++ ((C) == 'Q') || \
+ ((C) == 'S') || \
++ ((C) == 'Y') || \
+ ((C) == 'Z') )
+
+
+handling like that provided by DWARF 2.
+*/
+/*
-+ Use r8
++ Use r8
+*/
+#define EH_RETURN_STACKADJ_REGNO INTERNAL_REGNUM(8)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG(SImode, EH_RETURN_STACKADJ_REGNO)
+
+#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) avr32_optimization_options (LEVEL, SIZE)
+
-+
+/******************************************************************************
+ * Addressing Modes
+ *****************************************************************************/
+ * Condition Code Status
+ *****************************************************************************/
+
-+#define HAVE_conditional_move 1
-+
+/*
+C code for a data type which is used for declaring the mdep
+component of cc_status. It defaults to int.
+ rtx value;
+ int fpflags;
+ rtx fpvalue;
++ int cond_exec_cmp_clobbered;
+} avr32_status_reg;
+
+
+*/
+
+#define CC_STATUS_MDEP_INIT \
-+ (cc_status.mdep.flags = CC_NONE , cc_status.mdep.value = 0)
++ (cc_status.mdep.flags = CC_NONE , cc_status.mdep.cond_exec_cmp_clobbered = 0, cc_status.mdep.value = 0)
+
+#define FPCC_STATUS_INIT \
+ (cc_status.mdep.fpflags = CC_NONE , cc_status.mdep.fpvalue = 0)
+punctuation characters (except for the standard one, '%') are used
+in this way.
+*/
-+/*
-+ 'm' refers to the most significant word in a two-register mode.
-+*/
-+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == 'm' || (CODE) == 'e')
++#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
++ (((CODE) == '?') \
++ || ((CODE) == '!'))
+
+/*
+A C compound statement to output to stdio stream STREAM the
+#define CASE_VECTOR_PC_RELATIVE 0
+
+/* Increase the threshold for using table jumps on the UC arch. */
-+#define CASE_VALUES_THRESHOLD ((avr32_arch->arch_type == ARCH_TYPE_AVR32_UC) ? 7 : 4)
++#define CASE_VALUES_THRESHOLD (TARGET_BRANCH_PRED ? 4 : 7)
+
+/*
+The maximum number of bytes that a single instruction can move quickly
+
+#define STORE_FLAG_VALUE 1
+
++
++/* IF-conversion macros. */
++#define IFCVT_MODIFY_INSN( CE_INFO, PATTERN, INSN ) \
++ { \
++ (PATTERN) = avr32_ifcvt_modify_insn (CE_INFO, PATTERN, INSN, &num_true_changes); \
++ }
++
++#define IFCVT_EXTRA_FIELDS \
++ int num_cond_clobber_insns; \
++ int num_extra_move_insns; \
++ rtx extra_move_insns[MAX_CONDITIONAL_EXECUTE]; \
++ rtx moved_insns[MAX_CONDITIONAL_EXECUTE];
++
++#define IFCVT_INIT_EXTRA_FIELDS( CE_INFO ) \
++ { \
++ (CE_INFO)->num_cond_clobber_insns = 0; \
++ (CE_INFO)->num_extra_move_insns = 0; \
++ }
++
++
++#define IFCVT_MODIFY_CANCEL( CE_INFO ) avr32_ifcvt_modify_cancel (CE_INFO, &num_true_changes)
++
++#define IFCVT_ALLOW_MODIFY_TEST_IN_INSN 1
++#define IFCVT_COND_EXEC_BEFORE_RELOAD (TARGET_COND_EXEC_BEFORE_RELOAD)
++
+enum avr32_builtins
+{
+ AVR32_BUILTIN_MTSR,
+ AVR32_BUILTIN_MFDR,
+ AVR32_BUILTIN_CACHE,
+ AVR32_BUILTIN_SYNC,
++ AVR32_BUILTIN_SSRF,
++ AVR32_BUILTIN_CSRF,
+ AVR32_BUILTIN_TLBR,
+ AVR32_BUILTIN_TLBS,
+ AVR32_BUILTIN_TLBW,
+ AVR32_BUILTIN_SATS,
+ AVR32_BUILTIN_SATU,
+ AVR32_BUILTIN_SATRNDS,
-+ AVR32_BUILTIN_SATRNDU
++ AVR32_BUILTIN_SATRNDU,
++ AVR32_BUILTIN_MEMS,
++ AVR32_BUILTIN_MEMC,
++ AVR32_BUILTIN_MEMT
+};
+
+
+#endif
+
+#endif
-Index: gcc-4.2.3/gcc/config/avr32/avr32.md
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/avr32.md 2008-05-21 13:45:54.165287871 +0200
-@@ -0,0 +1,4606 @@
+--- /dev/null
++++ b/gcc/config/avr32/avr32.md
+@@ -0,0 +1,4893 @@
+;; AVR32 machine description file.
+;; Copyright 2003-2006 Atmel Corporation.
+;;
+ (const_string "alu"))
+
+
-+(define_attr "cc" "none,set_vncz,set_ncz,set_cz,set_z,bld,compare,cmp_cond_insn,clobber,call_set,fpcompare,from_fpcc"
++(define_attr "cc" "none,set_vncz,set_ncz,set_cz,set_z,set_z_if_not_v2,bld,compare,cmp_cond_insn,clobber,call_set,fpcompare,from_fpcc"
+ (const_string "none"))
+
+
+; NB! Keep this in sync with enum architecture_type in avr32.h
-+(define_attr "pipeline" "ap,uc"
++(define_attr "pipeline" "ap,ucr1,ucr2,ucr2nomul"
+ (const (symbol_ref "avr32_arch->arch_type")))
+
++; Insn length in bytes
+(define_attr "length" ""
+ (const_int 4))
+
++; Signal if an insn is predicable and hence can be conditionally executed.
++(define_attr "predicable" "no,yes" (const_string "no"))
+
+;; Uses of UNSPEC in this file:
+(define_constants
+ (VUNSPEC_SYNC_STORE_IF_LOCK 25)
+ (VUNSPEC_EH_RETURN 26)
+ (VUNSPEC_FRS 27)
++ (VUNSPEC_CSRF 28)
++ (VUNSPEC_SSRF 29)
+ ])
+
+(define_constants
+
+;; For mov/addcc insns
+(define_mode_macro ADDCC [SI HI QI])
-+(define_mode_macro MOVCC [SI HI QI])
++(define_mode_macro MOVCC [SF SI HI QI])
+(define_mode_macro CMP [DI SI HI QI])
++(define_mode_attr store_postfix [(SF ".w") (SI ".w") (HI ".h") (QI ".b")])
++(define_mode_attr load_postfix [(SF ".w") (SI ".w") (HI ".sh") (QI ".ub")])
++(define_mode_attr load_postfix_s [(SI ".w") (HI ".sh") (QI ".sb")])
++(define_mode_attr load_postfix_u [(SI ".w") (HI ".uh") (QI ".ub")])
++(define_mode_attr pred_mem_constraint [(SF "RKu11") (SI "RKu11") (HI "RKu10") (QI "RKu09")])
+(define_mode_attr cmp_constraint [(DI "rKu20") (SI "rKs21") (HI "r") (QI "r")])
+(define_mode_attr cmp_predicate [(DI "register_immediate_operand")
-+ (SI "register_immediate_operand")
++ (SI "register_const_int_operand")
+ (HI "register_operand")
+ (QI "register_operand")])
++(define_mode_attr cmp_length [(DI "6")
++ (SI "4")
++ (HI "4")
++ (QI "4")])
+
+;; For all conditional insns
+(define_code_macro any_cond [eq ne gt ge lt le gtu geu ltu leu])
+(define_code_macro logical [and ior xor])
+(define_code_attr logical_insn [(and "and") (ior "or") (xor "eor")])
+
++;; Predicable operations with three register operands
++(define_code_macro predicable_op3 [and ior xor plus minus])
++(define_code_attr predicable_insn3 [(and "and") (ior "or") (xor "eor") (plus "add") (minus "sub")])
++(define_code_attr predicable_commutative3 [(and "%") (ior "%") (xor "%") (plus "%") (minus "")])
++
+;; Load the predicates
+(include "predicates.md")
+
+;; End of Automaton pipeline description for avr32
+;;******************************************************************************
+
++(define_cond_exec
++ [(match_operator 0 "avr32_comparison_operator"
++ [(match_operand:CMP 1 "register_operand" "r")
++ (match_operand:CMP 2 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])]
++ "TARGET_V2_INSNS"
++ "%!"
++)
+
++(define_cond_exec
++ [(match_operator 0 "avr32_comparison_operator"
++ [(and:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "one_bit_set_operand" "i"))
++ (const_int 0)])]
++ "TARGET_V2_INSNS"
++ "%!"
++ )
+
+;;=============================================================================
+;; move
+;;-----------------------------------------------------------------------------
+
++
+;;== char - 8 bits ============================================================
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+(define_insn "*movqi_internal"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r")
+ (match_operand:QI 1 "general_operand" "rKs08,m,r,i"))]
-+ ""
++ "register_operand (operands[0], QImode)
++ || register_operand (operands[1], QImode)"
+ "@
+ mov\t%0, %1
+ ld.ub\t%0, %1
+
+ })
+
++
+(define_insn "*movhi_internal"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
+ (match_operand:HI 1 "general_operand" "rKs08,m,r,i"))]
-+ ""
++ "register_operand (operands[0], HImode)
++ || register_operand (operands[1], HImode)"
+ "@
+ mov\t%0, %1
+ ld.sh\t%0, %1
+ }
+)
+
++
+(define_expand "mov<mode>"
-+ [(set (match_operand:MOVM 0 "register_operand" "")
-+ (match_operand:MOVM 1 "general_operand" ""))]
++ [(set (match_operand:MOVM 0 "avr32_non_rmw_nonimmediate_operand" "")
++ (match_operand:MOVM 1 "avr32_non_rmw_general_operand" ""))]
+ ""
+ {
+
+ not check if the immediate is legitimate. Don't know if
+ this is a bug? */
+ if ( reload_in_progress
-+ && avr32_arch->arch_type != ARCH_TYPE_AVR32_UC
++ && avr32_imm_in_const_pool
+ && GET_CODE(operands[1]) == CONST_INT
+ && !avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', "Ks21") ){
+ operands[1] = force_const_mem(SImode, operands[1]);
+ }
+
++ /* Check for RMW memory operands. They are not allowed for mov operations
++ only the atomic memc/s/t operations */
++ if ( !reload_in_progress
++ && avr32_rmw_memory_operand (operands[0], <MODE>mode) ){
++ operands[0] = copy_rtx (operands[0]);
++ XEXP(operands[0], 0) = force_reg (<MODE>mode, XEXP(operands[0], 0));
++ }
++
++ if ( !reload_in_progress
++ && avr32_rmw_memory_operand (operands[1], <MODE>mode) ){
++ operands[1] = copy_rtx (operands[1]);
++ XEXP(operands[1], 0) = force_reg (<MODE>mode, XEXP(operands[1], 0));
++ }
++
+ if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
+ && !avr32_legitimate_pic_operand_p(operands[1]) )
+ operands[1] = legitimize_pic_address (operands[1], <MODE>mode,
+ })
+
+
++
+(define_insn "mov<mode>_internal"
-+ [(set (match_operand:MOVM 0 "nonimmediate_operand" "=r, r, r,r,m,r")
-+ (match_operand:MOVM 1 "general_operand" "rKs08,Ks21,n,m,r,W"))]
-+ ""
++ [(set (match_operand:MOVM 0 "avr32_non_rmw_nonimmediate_operand" "=r, r, r,r,r,Q,r")
++ (match_operand:MOVM 1 "avr32_non_rmw_general_operand" "rKs08,Ks21,J,n,Q,r,W"))]
++ "(register_operand (operands[0], <MODE>mode)
++ || register_operand (operands[1], <MODE>mode))
++ && !avr32_rmw_memory_operand (operands[0], <MODE>mode)
++ && !avr32_rmw_memory_operand (operands[1], <MODE>mode)"
+ {
+ switch (which_alternative) {
+ case 0:
+ case 1: return "mov\t%0, %1";
-+ case 2: return "mov\t%0, lo(%1)\;orh\t%0,hi(%1)";
-+ case 3:
++ case 2:
++ if ( TARGET_V2_INSNS )
++ return "movh\t%0, hi(%1)";
++ /* Fallthrough */
++ case 3: return "mov\t%0, lo(%1)\;orh\t%0,hi(%1)";
++ case 4:
+ if ( (REG_P(XEXP(operands[1], 0))
+ && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
+ || (GET_CODE(XEXP(operands[1], 0)) == PLUS
+ return "lddpc\t%0, %1";
+ else
+ return "ld.w\t%0, %1";
-+ case 4:
++ case 5:
+ if ( (REG_P(XEXP(operands[0], 0))
+ && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
+ || (GET_CODE(XEXP(operands[0], 0)) == PLUS
+ return "stdsp\t%0, %1";
+ else
+ return "st.w\t%0, %1";
-+ case 5:
++ case 6:
+ if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
+ return "lda.w\t%0, %1";
+ else
+ }
+ }
+
-+ [(set_attr "length" "2,4,8,4,4,8")
-+ (set_attr "type" "alu,alu,alu2,load,store,load")
-+ (set_attr "cc" "none,none,set_z,none,none,clobber")])
++ [(set_attr "length" "2,4,4,8,4,4,8")
++ (set_attr "type" "alu,alu,alu,alu2,load,store,load")
++ (set_attr "cc" "none,none,set_z_if_not_v2,set_z,none,none,clobber")])
++
++
++(define_expand "reload_out_rmw_memory_operand"
++ [(set (match_operand:SI 2 "register_operand" "=r")
++ (match_operand:SI 0 "address_operand" ""))
++ (set (mem:SI (match_dup 2))
++ (match_operand:SI 1 "register_operand" ""))]
++ ""
++ {
++ operands[0] = XEXP(operands[0], 0);
++ }
++)
++
++(define_expand "reload_in_rmw_memory_operand"
++ [(set (match_operand:SI 2 "register_operand" "=r")
++ (match_operand:SI 1 "address_operand" ""))
++ (set (match_operand:SI 0 "register_operand" "")
++ (mem:SI (match_dup 2)))]
++ ""
++ {
++ operands[1] = XEXP(operands[1], 0);
++ }
++)
+
+
+;; These instructions are for loading constants which cannot be loaded
+(define_insn_and_split "*movdi_internal"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r, r, r,r,r,m")
+ (match_operand:DI 1 "general_operand" "r, Ks08,Ks21,G,n,m,r"))]
-+ ""
++ "register_operand (operands[0], DImode)
++ || register_operand (operands[1], DImode)"
+ {
+ switch (which_alternative ){
+ case 0:
+(define_expand "movti"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (match_operand:TI 1 "nonimmediate_operand" ""))]
-+ ""
-+ {
-+
++ "TARGET_ARCH_AP"
++ {
++
+ /* One of the ops has to be in a register. */
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (TImode, operands[1]);
+
+
+(define_insn_and_split "*movti_internal"
-+ [(set (match_operand:TI 0 "avr32_movti_dst_operand" "=r,r,<RKu00,r,r")
-+ (match_operand:TI 1 "avr32_movti_src_operand" " r,RKu00>,r,n,m"))]
-+ ""
++ [(set (match_operand:TI 0 "avr32_movti_dst_operand" "=r,&r, r, <RKu00,r,r")
++ (match_operand:TI 1 "avr32_movti_src_operand" " r,RKu00>,RKu00,r, n,T"))]
++ "(register_operand (operands[0], TImode)
++ || register_operand (operands[1], TImode))"
+ {
+ switch (which_alternative ){
+ case 0:
-+ case 3:
++ case 2:
++ case 4:
+ return "#";
+ case 1:
+ return "ldm\t%p1, %0";
-+ case 2:
++ case 3:
+ return "stm\t%p0, %1";
-+ case 4:
++ case 5:
+ return "ld.d\t%U0, pc[%1 - .]\;ld.d\t%B0, pc[%1 - . + 8]";
+ }
+ }
+ "reload_completed &&
+ (REG_P (operands[0]) &&
+ (REG_P (operands[1])
++ /* If this is a load from the constant pool we split it into
++ two double loads. */
+ || (GET_CODE (operands[1]) == MEM
+ && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
++ /* If this is a load where the pointer register is a part
++ of the register list, we must split it into two double
++ loads in order for it to be exception safe. */
++ || (GET_CODE (operands[1]) == MEM
++ && register_operand (XEXP (operands[1], 0), SImode)
++ && reg_overlap_mentioned_p (operands[0], XEXP (operands[1], 0)))
+ || GET_CODE (operands[1]) == CONST_INT
+ || GET_CODE (operands[1]) == CONST_DOUBLE))"
+ [(set (match_dup 0) (match_dup 1))
+ operands[3] = force_const_mem (DImode, split_const[1]);
+ operands[1] = force_const_mem (DImode, split_const[0]);
+ } else {
-+ internal_error("Illegal operand[1] for movdi split!");
-+ }
++ rtx ptr_reg = XEXP (operands[1], 0);
++ operands[1] = gen_rtx_MEM (DImode,
++ gen_rtx_PLUS ( SImode,
++ ptr_reg,
++ GEN_INT (8) ));
++ operands[3] = gen_rtx_MEM (DImode,
++ ptr_reg);
++
++ /* Check if the first load will clobber the pointer.
++ If so, we must switch the order of the operations. */
++ if ( reg_overlap_mentioned_p (operands[0], ptr_reg) )
++ {
++ /* We need to switch the order of the operations
++ so that the pointer register does not get clobbered
++ after the first double word load. */
++ rtx tmp;
++ tmp = operands[0];
++ operands[0] = operands[2];
++ operands[2] = tmp;
++ tmp = operands[1];
++ operands[1] = operands[3];
++ operands[3] = tmp;
++ }
++
++
++ }
+ }
-+ [(set_attr "length" "*,4,4,*,8")
-+ (set_attr "type" "*,load4,store4,*,load4")])
++ [(set_attr "length" "*,*,4,4,*,8")
++ (set_attr "type" "*,*,load4,store4,*,load4")])
+
+
+;;== float - 32 bits ==========================================================
+(define_insn "*movsf_internal"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,r,m")
+ (match_operand:SF 1 "general_operand" "r, G,F,m,r"))]
-+ "TARGET_SOFT_FLOAT"
++ "(register_operand (operands[0], SFmode)
++ || register_operand (operands[1], SFmode))"
+ {
+ switch (which_alternative) {
+ case 0:
+ case 1: return "mov\t%0, %1";
-+ case 2: return "mov\t%0, lo(%1)\;orh\t%0, hi(%1)";
++ case 2:
++ {
++ HOST_WIDE_INT target_float[2];
++ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (operands[1]), SFmode);
++ if ( TARGET_V2_INSNS
++ && avr32_hi16_immediate_operand (GEN_INT (target_float[0]), VOIDmode) )
++ return "movh\t%0, hi(%1)";
++ else
++ return "mov\t%0, lo(%1)\;orh\t%0, hi(%1)";
++ }
+ case 3:
+ if ( (REG_P(XEXP(operands[1], 0))
+ && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
+(define_insn_and_split "*movdf_internal"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,r,r,m")
+ (match_operand:DF 1 "general_operand" " r,G,F,m,r"))]
-+ "TARGET_SOFT_FLOAT"
++ "TARGET_SOFT_FLOAT
++ && (register_operand (operands[0], DFmode)
++ || register_operand (operands[1], DFmode))"
+ {
+ switch (which_alternative ){
+ case 0:
+ (set_attr "cc" "*,*,*,none,none")])
+
+
++;;=============================================================================
++;; Conditional Moves
++;;=============================================================================
++(define_insn "ld<mode>_predicable"
++ [(set (match_operand:MOVCC 0 "register_operand" "=r")
++ (match_operand:MOVCC 1 "avr32_non_rmw_memory_operand" "<MOVCC:pred_mem_constraint>"))]
++ "TARGET_V2_INSNS"
++ "ld<MOVCC:load_postfix>%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "load")
++ (set_attr "predicable" "yes")]
++)
++
++
++(define_insn "st<mode>_predicable"
++ [(set (match_operand:MOVCC 0 "avr32_non_rmw_memory_operand" "=<MOVCC:pred_mem_constraint>")
++ (match_operand:MOVCC 1 "register_operand" "r"))]
++ "TARGET_V2_INSNS"
++ "st<MOVCC:store_postfix>%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "store")
++ (set_attr "predicable" "yes")]
++)
++
++(define_insn "mov<mode>_predicable"
++ [(set (match_operand:MOVCC 0 "register_operand" "=r")
++ (match_operand:MOVCC 1 "avr32_cond_register_immediate_operand" "rKs08"))]
++ ""
++ "mov%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "alu")
++ (set_attr "predicable" "yes")]
++)
+
+
+;;=============================================================================
+ DONE;
+ FAIL;
+ "
-+
+ )
+
+
+
++
+;;=============================================================================
+;; Bit field instructions
+;;-----------------------------------------------------------------------------
+ "add %0, %2, %1 << %3"
+ [(set_attr "length" "4")
+ (set_attr "cc" "<INTM:alu_cc_attr>")])
-+
++
+(define_insn "add<mode>3_lsl2"
+ [(set (match_operand:INTM 0 "register_operand" "=r")
+ (plus:INTM (match_operand:INTM 1 "register_operand" "r")
+ (set_attr "cc" "<INTM:alu_cc_attr>")])
+
+
-+
+(define_insn "add<mode>3_mul"
+ [(set (match_operand:INTM 0 "register_operand" "=r")
+ (plus:INTM (mult:INTM (match_operand:INTM 1 "register_operand" "r")
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
-+ (plus:DI (match_operand:DI 1 "register_operand" "%r,0")
++ (plus:DI (match_operand:DI 1 "register_operand" "%0,r")
+ (match_operand:DI 2 "register_operand" "r,r")))]
+ ""
+ "@
-+ add %0, %1, %2\;adc %m0, %m1, %m2
-+ add %0, %2\;adc %m0, %m0, %m2"
-+ [(set_attr "length" "8,6")
++ add %0, %2\;adc %m0, %m0, %m2
++ add %0, %1, %2\;adc %m0, %m1, %m2"
++ [(set_attr "length" "6,8")
+ (set_attr "type" "alu2")
+ (set_attr "cc" "set_vncz")])
+
+
++(define_insn "add<mode>_imm_predicable"
++ [(set (match_operand:INTM 0 "register_operand" "+r")
++ (plus:INTM (match_dup 0)
++ (match_operand:INTM 1 "avr32_cond_immediate_operand" "%Is08")))]
++ ""
++ "sub%?\t%0, -%1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")]
++)
+
+;;=============================================================================
+;; subtract
+ (set_attr "cc" "<INTM:alu_cc_attr>")])
+
+(define_insn "*sub<mode>3_mul"
-+ [(set (match_operand:INTM 0 "register_operand" "=r,r,r")
-+ (minus:INTM (match_operand:INTM 1 "register_operand" "r,0,r")
-+ (mult:INTM (match_operand:INTM 2 "register_operand" "r,r,0")
-+ (match_operand:SI 3 "immediate_operand" "Ku04,Ku04,Ku04" ))))]
++ [(set (match_operand:INTM 0 "register_operand" "=r")
++ (minus:INTM (match_operand:INTM 1 "register_operand" "r")
++ (mult:INTM (match_operand:INTM 2 "register_operand" "r")
++ (match_operand:SI 3 "immediate_operand" "Ku04" ))))]
+ "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
+ (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
-+ "@
-+ sub %0, %1, %2 << %p3
-+ sub %0, %0, %2 << %p3
-+ sub %0, %1, %0 << %p3"
-+ [(set_attr "length" "4,4,4")
++ "sub %0, %1, %2 << %p3"
++ [(set_attr "length" "4")
+ (set_attr "cc" "<INTM:alu_cc_attr>")])
+
+(define_insn "*sub<mode>3_lsl"
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
-+ (minus:DI (match_operand:DI 1 "register_operand" "%r,0")
++ (minus:DI (match_operand:DI 1 "register_operand" "%0,r")
+ (match_operand:DI 2 "register_operand" "r,r")))]
+ ""
+ "@
-+ sub %0, %1, %2\;sbc %m0, %m1, %m2
-+ sub %0, %2\;sbc %m0, %m0, %m2"
-+ [(set_attr "length" "8,6")
++ sub %0, %2\;sbc %m0, %m0, %m2
++ sub %0, %1, %2\;sbc %m0, %m1, %m2"
++ [(set_attr "length" "6,8")
+ (set_attr "type" "alu2")
+ (set_attr "cc" "set_vncz")])
+
+
++(define_insn "sub<mode>_imm_predicable"
++ [(set (match_operand:INTM 0 "register_operand" "+r")
++ (minus:INTM (match_dup 0)
++ (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")))]
++ ""
++ "sub%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")])
++
++(define_insn "rsub<mode>_imm_predicable"
++ [(set (match_operand:INTM 0 "register_operand" "+r")
++ (minus:INTM (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")
++ (match_dup 0)))]
++ ""
++ "rsub%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")])
+
+;;=============================================================================
+;; multiply
+ [(set (match_operand:QI 0 "register_operand" "=r,r,r")
+ (mult:QI (match_operand:QI 1 "register_operand" "%0,r,r")
+ (match_operand:QI 2 "avr32_mul_operand" "r,r,Ks08")))]
-+ ""
++ "!TARGET_NO_MUL_INSNS"
+ {
+ switch (which_alternative){
+ case 0:
+ case 2:
+ return "mul %0, %1, %2";
+ default:
-+ abort();
++ gcc_unreachable();
+ }
+ }
+ [(set_attr "type" "mulww_w,mulww_w,mulwh")
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0,r,r")
+ (match_operand:SI 2 "avr32_mul_operand" "r,r,Ks08")))]
-+ ""
++ "!TARGET_NO_MUL_INSNS"
+ {
+ switch (which_alternative){
+ case 0:
+ case 2:
+ return "mul %0, %1, %2";
+ default:
-+ abort();
++ gcc_unreachable();
+ }
+ }
+ [(set_attr "type" "mulww_w,mulww_w,mulwh")
+ (mult:SI
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
+ (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "mulhh.w %0, %1:b, %2:b"
+ [(set_attr "type" "mulhh")
+ (set_attr "length" "4")
+ (set (match_operand:SI 3 "register_operand" "")
+ (ashiftrt:SI (match_dup 0)
+ (const_int 16)))]
-+ "TARGET_DSP
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP
+ && (peep2_reg_dead_p(1, operands[0]) || (REGNO(operands[0]) == REGNO(operands[3])))"
+ [(set (match_dup 4) (sign_extend:SI (match_dup 1)))
+ (set (match_dup 6)
+ (mult:SI
+ (sign_extend:SI (neg:HI (match_operand:HI 1 "register_operand" "r")))
+ (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "mulnhh.w %0, %1:b, %2:b"
+ [(set_attr "type" "mulhh")
+ (set_attr "length" "4")
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
+ (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
+ (match_dup 0)))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "machh.w %0, %1:b, %2:b"
+ [(set_attr "type" "machh_w")
+ (set_attr "length" "4")
+ (mult:DI
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
-+ ""
++ "!TARGET_NO_MUL_INSNS"
+ "muls.d %0, %1, %2"
+ [(set_attr "type" "mulww_d")
+ (set_attr "length" "4")
+ (mult:DI
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
-+ ""
++ "!TARGET_NO_MUL_INSNS"
+ "mulu.d %0, %1, %2"
+ [(set_attr "type" "mulww_d")
+ (set_attr "length" "4")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "r"))
+ (match_dup 0)))]
-+ ""
++ "!TARGET_NO_MUL_INSNS"
+ "mac %0, %1, %2"
+ [(set_attr "type" "macww_w")
+ (set_attr "length" "4")
+ (set_attr "cc" "none")])
+
-+(define_insn "mulaccsidi3"
++(define_insn "*mulaccsidi3"
+ [(set (match_operand:DI 0 "register_operand" "+r")
+ (plus:DI (mult:DI
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
+ (match_dup 0)))]
-+ ""
++ "!TARGET_NO_MUL_INSNS"
+ "macs.d %0, %1, %2"
+ [(set_attr "type" "macww_d")
+ (set_attr "length" "4")
+ (set_attr "cc" "none")])
+
-+(define_insn "umulaccsidi3"
++(define_insn "*umulaccsidi3"
+ [(set (match_operand:DI 0 "register_operand" "+r")
+ (plus:DI (mult:DI
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
+ (match_dup 0)))]
-+ ""
++ "!TARGET_NO_MUL_INSNS"
+ "macu.d %0, %1, %2"
+ [(set_attr "type" "macww_d")
+ (set_attr "length" "4")
+ (ss_truncate:HI (ashiftrt:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
+ (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
+ (const_int 15))))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "mulsathh.h\t%0, %1:b, %2:b"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")
+ (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
+ (const_int 1073741824))
+ (const_int 15))))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "mulsatrndhh.h\t%0, %1:b, %2:b"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")
+ (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
+ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+ (const_int 1))))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "mulsathh.w\t%0, %1:b, %2:b"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")
+ (ss_truncate:SI (ashiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+ (const_int 15))))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "mulsatwh.w\t%0, %1, %2:b"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")
+ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+ (const_int 1073741824))
+ (const_int 15))))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "mulsatrndwh.w\t%0, %1, %2:b"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")
+ (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
+ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+ (const_int 1)))))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "macsathh.w\t%0, %1:b, %2:b"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")
+ (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+ (const_int 16)))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "mulwh.d\t%0, %1, %2:b"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")
+ (ashift:DI (mult:DI (not:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")))
+ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+ (const_int 16)))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "mulnwh.d\t%0, %1, %2:b"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")
+ (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
+ (const_int 16))))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "macwh.d\t%0, %1, %2:b"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")
+ (plus:DI (match_dup 0)
+ (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
+ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))))]
-+ "TARGET_DSP"
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
+ "machh.d\t%0, %1:b, %2:b"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")
+ (set_attr "cc" "none")])
+
+
++
+;;=============================================================================
+;; Logical operations
+;;-----------------------------------------------------------------------------
+
++
+;; Split up simple DImode logical operations. Simply perform the logical
+;; operation on the upper and lower halves of the registers.
+(define_split
+;;=============================================================================
+
+(define_insn "andnsi"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (and:SI (match_operand:SI 1 "register_operand" "0")
-+ (not:SI (match_operand:SI 2 "register_operand" "r"))))]
++ [(set (match_operand:SI 0 "register_operand" "+r")
++ (and:SI (match_dup 0)
++ (not:SI (match_operand:SI 1 "register_operand" "r"))))]
+ ""
-+ "andn %0, %2"
++ "andn %0, %1"
+ [(set_attr "cc" "set_z")
+ (set_attr "length" "2")]
+)
+
+
-+
-+
+(define_insn "andsi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r, r, r, r")
-+ (and:SI (match_operand:SI 1 "register_operand" "%0, r, 0, r")
-+ (match_operand:SI 2 "nonmemory_operand" "r, M, i, r")))]
++ [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r,r,r, r, r,r,r,r,r")
++ (and:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,r,0,0, 0, 0,0,0,0,r" )
++ (match_operand:SI 2 "nonmemory_operand" " N,M,N,Ku16,Ks17,J,L,r,i,r")))]
+ ""
-+ {
-+ switch (which_alternative){
-+ case 0:
-+ return "and\t%0, %2";
-+ case 1:
-+ {
-+ int i, first_set = -1;
-+ /* Search for first bit set in mask */
-+ for ( i = 31; i >= 0; --i )
-+ if ( INTVAL(operands[2]) & (1 << i) ){
-+ first_set = i;
-+ break;
-+ }
-+ operands[2] = gen_rtx_CONST_INT(SImode, first_set + 1);
-+ return "bfextu\t%0, %1, 0, %2";
-+ }
-+ case 2:
-+ if ( one_bit_cleared_operand(operands[2], VOIDmode) ){
-+ int bitpos;
-+ for ( bitpos = 0; bitpos < 32; bitpos++ )
-+ if ( !(INTVAL(operands[2]) & (1 << bitpos)) )
-+ break;
-+ operands[2] = gen_rtx_CONST_INT(SImode, bitpos);
-+ return "cbr\t%0, %2";
-+ } else if ( (INTVAL(operands[2]) >= 0) &&
-+ (INTVAL(operands[2]) <= 65535) )
-+ return "andl\t%0, %2, COH";
-+ else if ( (INTVAL(operands[2]) < 0) &&
-+ (INTVAL(operands[2]) >= -65536 ) )
-+ return "andl\t%0, lo(%2)";
-+ else if ( ((INTVAL(operands[2]) & 0xffff) == 0xffff) )
-+ return "andh\t%0, hi(%2)";
-+ else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
-+ return "andh\t%0, hi(%2), COH";
-+ else
-+ return "andh\t%0, hi(%2)\;andl\t%0, lo(%2)";
-+ case 3:
-+ return "and\t%0, %1, %2";
-+ default:
-+ abort();
-+ }
-+ }
-+
-+ [(set_attr "length" "2,4,8,4")
-+ (set_attr "cc" "set_z")])
-+
++ "@
++ memc\t%0, %z2
++ bfextu\t%0, %1, 0, %z2
++ cbr\t%0, %z2
++ andl\t%0, %2, COH
++ andl\t%0, lo(%2)
++ andh\t%0, hi(%2), COH
++ andh\t%0, hi(%2)
++ and\t%0, %2
++ andh\t%0, hi(%2)\;andl\t%0, lo(%2)
++ and\t%0, %1, %2"
++
++ [(set_attr "length" "4,4,2,4,4,4,4,2,8,4")
++ (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z,set_z,set_z,set_z,set_z")])
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
+;;=============================================================================
+
+(define_insn "iorsi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
-+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0,r" )
-+ (match_operand:SI 2 "nonmemory_operand" "r ,i,r")))]
++ [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r,r, r,r,r,r")
++ (ior:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,0,0, 0,0,0,r" )
++ (match_operand:SI 2 "nonmemory_operand" " O,O,Ku16,J,r,i,r")))]
+ ""
-+ {
-+ switch (which_alternative){
-+ case 0:
-+ return "or\t%0, %2";
-+ case 1:
-+ if ( one_bit_set_operand(operands[2], VOIDmode) ){
-+ int bitpos;
-+ for (bitpos = 0; bitpos < 32; bitpos++)
-+ if (INTVAL(operands[2]) & (1 << bitpos))
-+ break;
-+ operands[2] = gen_rtx_CONST_INT( SImode, bitpos);
-+ return "sbr\t%0, %2";
-+ } else if ( (INTVAL(operands[2]) >= 0) &&
-+ (INTVAL(operands[2]) <= 65535) )
-+ return "orl\t%0, %2";
-+ else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
-+ return "orh\t%0, hi(%2)";
-+ else
-+ return "orh\t%0, hi(%2)\;orl\t%0, lo(%2)";
-+ case 2:
-+ return "or\t%0, %1, %2";
-+ default:
-+ abort();
-+ }
-+ }
-+ [(set_attr "length" "2,8,4")
-+ (set_attr "cc" "set_z")])
++ "@
++ mems\t%0, %p2
++ sbr\t%0, %p2
++ orl\t%0, %2
++ orh\t%0, hi(%2)
++ or\t%0, %2
++ orh\t%0, hi(%2)\;orl\t%0, lo(%2)
++ or\t%0, %1, %2"
+
++ [(set_attr "length" "4,2,4,4,2,8,4")
++ (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z,set_z")])
+
-+;(define_insn "iorsi3"
-+; [(set (match_operand:SI 0 "register_operand" "=r, r, r")
-+; (ior:SI (match_operand:SI 1 "avr32_logical_insn_operand" "r, r, rA" )
-+; (match_operand:SI 2 "register_operand" "0, i, r")))]
-+; ""
-+; {
-+; switch (which_alternative){
-+; case 0:
-+; return "or %0, %2";
-+; case 1:
-+; if ( one_bit_set_operand(operands[2], VOIDmode) ){
-+; int i, bitpos;
-+; for ( i = 0; i < 32; i++ )
-+; if ( INTVAL(operands[2]) & (1 << i) ){
-+; bitpos = i;
-+; break;
-+; }
-+; operands[2] = gen_rtx_CONST_INT( SImode, bitpos);
-+; return "sbr %0, %2";
-+; } else if ( (INTVAL(operands[2]) >= 0) &&
-+; (INTVAL(operands[2]) <= 65535) )
-+; return "orl %0, %2";
-+; else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
-+; return "orh %0, hi(%2)";
-+; else
-+; return "orh %0, hi(%2)\;orl %0, lo(%2)";
-+; case 2:
-+; return "or %0, %2, %1";
-+; }
-+; }
-+; [(set_attr "length" "2,8,4")
-+; (set_attr "cc" "set_z")])
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
+;;=============================================================================
+
+(define_insn "xorsi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
-+ (xor:SI (match_operand:SI 1 "register_operand" "0,0,r")
-+ (match_operand:SI 2 "nonmemory_operand" "r,i,r")))]
++ [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r, r,r,r,r")
++ (xor:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,0, 0,0,0,r" )
++ (match_operand:SI 2 "nonmemory_operand" " O,Ku16,J,r,i,r")))]
+ ""
-+ {
-+ switch (which_alternative){
-+ case 0:
-+ return "eor %0, %2";
-+ case 1:
-+ if ( (INTVAL(operands[2]) >= 0) &&
-+ (INTVAL(operands[2]) <= 65535) )
-+ return "eorl %0, %2";
-+ else if ( ((INTVAL(operands[2]) & 0xffff) == 0x0) )
-+ return "eorh %0, hi(%2)";
-+ else
-+ return "eorh %0, hi(%2)\;eorl %0, lo(%2)";
-+ case 2:
-+ return "eor %0, %1, %2";
-+ default:
-+ abort();
-+ }
-+ }
++ "@
++ memt\t%0, %p2
++ eorl\t%0, %2
++ eorh\t%0, hi(%2)
++ eor\t%0, %2
++ eorh\t%0, hi(%2)\;eorl\t%0, lo(%2)
++ eor\t%0, %1, %2"
++
++ [(set_attr "length" "4,4,4,2,8,4")
++ (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z")])
+
-+ [(set_attr "length" "2,8,4")
-+ (set_attr "cc" "set_z")])
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
+)
+
+;;=============================================================================
++;; Three operand predicable insns
++;;=============================================================================
++
++(define_insn "<predicable_insn3><mode>_predicable"
++ [(set (match_operand:INTM 0 "register_operand" "=r")
++ (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r")
++ (match_operand:INTM 2 "register_operand" "r")))]
++ "TARGET_V2_INSNS"
++ "<predicable_insn3>%?\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")]
++)
++
++(define_insn_and_split "<predicable_insn3><mode>_imm_clobber_predicable"
++ [(parallel
++ [(set (match_operand:INTM 0 "register_operand" "=r")
++ (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r")
++ (match_operand:INTM 2 "avr32_mov_immediate_operand" "JKs21")))
++ (clobber (match_operand:INTM 3 "register_operand" "=&r"))])]
++ "TARGET_V2_INSNS"
++ {
++ if ( current_insn_predicate != NULL_RTX )
++ {
++ if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") )
++ return "%! mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
++ else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") )
++ return "%! mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
++ else
++ return "%! movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3";
++ }
++ else
++ {
++ if ( !avr32_cond_imm_clobber_splittable (insn, operands) )
++ {
++ if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") )
++ return "mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
++ else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") )
++ return "mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
++ else
++ return "movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3";
++ }
++ return "#";
++ }
++
++ }
++ ;; If we find out that we could not actually do if-conversion on the block
++ ;; containing this insn we convert it back to normal immediate format
++ ;; to avoid outputing a redundant move insn
++ ;; Do not split until after we have checked if we can make the insn
++ ;; conditional.
++ "(GET_CODE (PATTERN (insn)) != COND_EXEC
++ && cfun->machine->ifcvt_after_reload
++ && avr32_cond_imm_clobber_splittable (insn, operands))"
++ [(set (match_dup 0)
++ (predicable_op3:INTM (match_dup 1)
++ (match_dup 2)))]
++ ""
++ [(set_attr "length" "8")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")]
++ )
++
++
++;;=============================================================================
++;; Zero extend predicable insns
++;;=============================================================================
++(define_insn_and_split "zero_extendhisi_clobber_predicable"
++ [(parallel
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (zero_extend:SI (match_operand:HI 1 "register_operand" "r")))
++ (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
++ "TARGET_V2_INSNS"
++ {
++ if ( current_insn_predicate != NULL_RTX )
++ {
++ return "%! mov\t%2, 0xffff\;and%?\t%0, %1, %2";
++ }
++ else
++ {
++ return "#";
++ }
++
++ }
++ ;; If we find out that we could not actually do if-conversion on the block
++ ;; containing this insn we convert it back to normal immediate format
++ ;; to avoid outputing a redundant move insn
++ ;; Do not split until after we have checked if we can make the insn
++ ;; conditional.
++ "(GET_CODE (PATTERN (insn)) != COND_EXEC
++ && cfun->machine->ifcvt_after_reload)"
++ [(set (match_dup 0)
++ (zero_extend:SI (match_dup 1)))]
++ ""
++ [(set_attr "length" "8")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")]
++ )
++
++(define_insn_and_split "zero_extendqisi_clobber_predicable"
++ [(parallel
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (zero_extend:SI (match_operand:QI 1 "register_operand" "r")))
++ (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
++ "TARGET_V2_INSNS"
++ {
++ if ( current_insn_predicate != NULL_RTX )
++ {
++ return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2";
++ }
++ else
++ {
++ return "#";
++ }
++
++ }
++ ;; If we find out that we could not actually do if-conversion on the block
++ ;; containing this insn we convert it back to normal immediate format
++ ;; to avoid outputing a redundant move insn
++ ;; Do not split until after we have checked if we can make the insn
++ ;; conditional.
++ "(GET_CODE (PATTERN (insn)) != COND_EXEC
++ && cfun->machine->ifcvt_after_reload)"
++ [(set (match_dup 0)
++ (zero_extend:SI (match_dup 1)))]
++ ""
++ [(set_attr "length" "8")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")]
++ )
++
++(define_insn_and_split "zero_extendqihi_clobber_predicable"
++ [(parallel
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (zero_extend:HI (match_operand:QI 1 "register_operand" "r")))
++ (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
++ "TARGET_V2_INSNS"
++ {
++ if ( current_insn_predicate != NULL_RTX )
++ {
++ return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2";
++ }
++ else
++ {
++ return "#";
++ }
++
++ }
++ ;; If we find out that we could not actually do if-conversion on the block
++ ;; containing this insn we convert it back to normal immediate format
++ ;; to avoid outputing a redundant move insn
++ ;; Do not split until after we have checked if we can make the insn
++ ;; conditional.
++ "(GET_CODE (PATTERN (insn)) != COND_EXEC
++ && cfun->machine->ifcvt_after_reload)"
++ [(set (match_dup 0)
++ (zero_extend:HI (match_dup 1)))]
++ ""
++ [(set_attr "length" "8")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")]
++ )
++;;=============================================================================
+;; divmod
+;;-----------------------------------------------------------------------------
+;; Signed division that produces both a quotient and a remainder.
+;; Negate operand 1 and store the result in operand 0.
+;;=============================================================================
+(define_insn "negsi2"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (neg:SI (match_operand:SI 1 "register_operand" "0")))]
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (neg:SI (match_operand:SI 1 "register_operand" "0,r")))]
+ ""
-+ "neg %0"
-+ [(set_attr "length" "2")
++ "@
++ neg\t%0
++ rsub\t%0, %1, 0"
++ [(set_attr "length" "2,4")
+ (set_attr "cc" "set_vncz")])
+
++(define_insn "negsi2_predicable"
++ [(set (match_operand:SI 0 "register_operand" "+r")
++ (neg:SI (match_dup 0)))]
++ "TARGET_V2_INSNS"
++ "rsub%?\t%0, 0"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")])
++
+;;=============================================================================
+;; abs
+;;-----------------------------------------------------------------------------
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (abs:SI (match_operand:SI 1 "register_operand" "0")))]
+ ""
-+ "abs %0"
++ "abs\t%0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "set_z")])
+
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
-+ (not:SI (match_operand:SI 1 "register_operand" "r,0")))]
++ (not:SI (match_operand:SI 1 "register_operand" "0,r")))]
+ ""
+ "@
-+ rsub %0, %1, -1
-+ com %0"
-+ [(set_attr "length" "4,2")
++ com\t%0
++ rsub\t%0, %1, -1"
++ [(set_attr "length" "2,4")
+ (set_attr "cc" "set_z")])
+
+
++(define_insn "one_cmplsi2_predicable"
++ [(set (match_operand:SI 0 "register_operand" "+r")
++ (not:SI (match_dup 0)))]
++ "TARGET_V2_INSNS"
++ "rsub%?\t%0, -1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")])
++
++
+;;=============================================================================
+;; Bit load
+;;-----------------------------------------------------------------------------
+;; Compare reg0 with reg1 or an immediate value.
+;;=============================================================================
+
-+(define_expand "cmpqi"
-+ [(set (cc0)
-+ (compare:QI
-+ (match_operand:QI 0 "general_operand" "")
-+ (match_operand:QI 1 "general_operand" "")))]
-+ ""
-+ "{
-+
-+ if ( GET_CODE(operands[0]) != REG
-+ && GET_CODE(operands[0]) != SUBREG)
-+ operands[0] = force_reg(QImode, operands[0]);
-+
-+
-+ if ( GET_CODE(operands[1]) != REG
-+ && GET_CODE(operands[1]) != SUBREG )
-+ operands[1] = force_reg(QImode, operands[1]);
-+
-+ avr32_compare_op0 = operands[0];
-+ avr32_compare_op1 = operands[1];
-+ emit_insn(gen_cmpqi_internal(operands[0], operands[1]));
-+ DONE;
-+ }"
-+)
-+
-+
-+(define_insn "cmpqi_internal"
-+ [(set (cc0)
-+ (compare:QI
-+ (match_operand:QI 0 "register_operand" "r")
-+ (match_operand:QI 1 "register_operand" "r")))]
-+ ""
-+ {
-+ set_next_insn_cond(insn,
-+ avr32_output_cmp(get_next_insn_cond(insn), QImode, operands[0], operands[1]));
-+ return "";
-+ }
-+ [(set_attr "length" "4")
-+ (set_attr "cc" "compare")])
-+
-+(define_expand "cmphi"
++(define_expand "cmp<mode>"
+ [(set (cc0)
-+ (compare:HI
-+ (match_operand:HI 0 "register_operand" "")
-+ (match_operand:HI 1 "register_operand" "")))]
++ (compare:CMP
++ (match_operand:CMP 0 "register_operand" "")
++ (match_operand:CMP 1 "<CMP:cmp_predicate>" "")))]
+ ""
+ "{
-+
-+ //if ( (GET_CODE(operands[0]) == REG
-+ // || GET_CODE(operands[0]) == SUBREG)
-+ // && (GET_CODE(operands[1]) == CONST_INT
-+ // && avr32_const_ok_for_constraint_p (INTVAL(operands[1]), 'K', \"Ks21\")) ){
-+ // operands[0] = convert_to_mode(SImode, operands[0], 0);
-+ // avr32_compare_op0 = operands[0];
-+ // avr32_compare_op1 = operands[1];
-+ // emit_insn(gen_cmpsi_internal(operands[0], operands[1]));
-+ // DONE;
-+ //}
-+
-+ if ( GET_CODE(operands[0]) != REG
-+ && GET_CODE(operands[0]) != SUBREG )
-+ operands[0] = force_reg(HImode, operands[0]);
-+
-+
-+ if ( GET_CODE(operands[1]) != REG
-+ && GET_CODE(operands[1]) != SUBREG)
-+ operands[1] = force_reg(HImode, operands[1]);
-+
+ avr32_compare_op0 = operands[0];
+ avr32_compare_op1 = operands[1];
+ }"
+)
+
-+
-+(define_insn "cmphi_internal"
++(define_insn "cmp<mode>_internal"
+ [(set (cc0)
-+ (compare:HI
-+ (match_operand:HI 0 "register_operand" "r")
-+ (match_operand:HI 1 "register_operand" "r")))]
++ (compare:CMP
++ (match_operand:CMP 0 "register_operand" "r")
++ (match_operand:CMP 1 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")))]
+ ""
+ {
+ set_next_insn_cond(insn,
-+ avr32_output_cmp(get_next_insn_cond(insn), HImode, operands[0], operands[1]));
++ avr32_output_cmp(get_next_insn_cond(insn), GET_MODE (operands[0]), operands[0], operands[1]));
+ return "";
+ }
+ [(set_attr "length" "4")
+ (set_attr "cc" "compare")])
+
+
-+(define_expand "cmpsi"
-+ [(set (cc0)
-+ (compare:SI
-+ (match_operand:SI 0 "register_operand" "")
-+ (match_operand:SI 1 "register_immediate_operand" "")))]
-+ ""
-+ "{
-+ if ( GET_CODE(operands[0]) != REG
-+ && GET_CODE(operands[0]) != SUBREG )
-+ operands[0] = force_reg(SImode, operands[0]);
-+
-+ if ( GET_CODE(operands[1]) != REG
-+ && GET_CODE(operands[1]) != SUBREG
-+ && GET_CODE(operands[1]) != CONST_INT )
-+ operands[1] = force_reg(SImode, operands[1]);
-+
-+ avr32_compare_op0 = operands[0];
-+ avr32_compare_op1 = operands[1];
-+
-+ }"
-+)
-+
-+
-+(define_insn "cmpsi_internal"
-+ [(set (cc0)
-+ (compare:SI
-+ (match_operand:SI 0 "register_operand" "r, r, r")
-+ (match_operand:SI 1 "register_immediate_operand" "r, Ks06, Ks21")))]
-+ ""
-+ {
-+ set_next_insn_cond(insn,
-+ avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], operands[1]));
-+ return "";
-+ }
-+
-+ [(set_attr "length" "2,2,4")
-+ (set_attr "cc" "compare")])
-+
-+
-+(define_expand "cmpdi"
-+ [(set (cc0)
-+ (compare:DI
-+ (match_operand:DI 0 "register_operand" "")
-+ (match_operand:DI 1 "register_immediate_operand" "")))]
-+ ""
-+ {
-+ avr32_compare_op0 = operands[0];
-+ avr32_compare_op1 = operands[1];
-+ }
-+)
-+
-+(define_insn "cmpdi_internal"
-+ [(set (cc0)
-+ (compare:DI
-+ (match_operand:DI 0 "register_operand" "r")
-+ (match_operand:DI 1 "register_immediate_operand" "rKu20")))]
-+ ""
-+ {
-+ set_next_insn_cond(insn,
-+ avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], operands[1]));
-+ return "";
-+ }
-+
-+ [(set_attr "length" "6")
-+ (set_attr "type" "alu2")
-+ (set_attr "cc" "compare")])
-+
-+
-+
-+;;=============================================================================
++;;;=============================================================================
+;; Test if zero
+;;-----------------------------------------------------------------------------
+;; Compare reg against zero and set the condition codes.
+ (set_attr "type" "alu, alu, load_rm, load_rm")])
+
+
++;;=============================================================================
++;; Conditional load and extend insns
++;;=============================================================================
++(define_insn "ldsi<mode>_predicable_se"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (sign_extend:SI
++ (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))]
++ "TARGET_V2_INSNS"
++ "ld<INTM:load_postfix_s>%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "load")
++ (set_attr "predicable" "yes")]
++)
++
++(define_insn "ldsi<mode>_predicable_ze"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (zero_extend:SI
++ (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))]
++ "TARGET_V2_INSNS"
++ "ld<INTM:load_postfix_u>%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "load")
++ (set_attr "predicable" "yes")]
++)
++
++(define_insn "ldhi_predicable_ze"
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (zero_extend:HI
++ (match_operand:QI 1 "memory_operand" "RKs10")))]
++ "TARGET_V2_INSNS"
++ "ld.ub%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "load")
++ (set_attr "predicable" "yes")]
++)
++
++(define_insn "ldhi_predicable_se"
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (sign_extend:HI
++ (match_operand:QI 1 "memory_operand" "RKs10")))]
++ "TARGET_V2_INSNS"
++ "ld.sb%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "load")
++ (set_attr "predicable" "yes")]
++)
+
+;;=============================================================================
+;; Conditional set register
+ (set_attr "cc" "none")])
+
+(define_insn "smi"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (unspec:SI [(cc0)
-+ (const_int 0)] UNSPEC_COND_MI))]
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec:SI [(cc0)
++ (const_int 0)] UNSPEC_COND_MI))]
+ ""
+ "srmi\t%0"
+ [(set_attr "length" "2")
+ (match_operand:ADDCC 2 "register_operand" "")
+ (plus:ADDCC
+ (match_dup 2)
-+ (match_operand:ADDCC 3 "avr32_cond_immediate_operand" ""))))]
++ (match_operand:ADDCC 3 "" ""))))]
+ ""
+ {
++ if ( !(GET_CODE (operands[3]) == CONST_INT
++ || (TARGET_V2_INSNS && REG_P(operands[3]))) ){
++ FAIL;
++ }
++
+ /* Delete compare instruction as it is merged into this instruction */
+ remove_insn (get_last_insn_anywhere ());
+
+ operands[4] = avr32_compare_op0;
+ operands[5] = avr32_compare_op1;
++
++ if ( TARGET_V2_INSNS
++ && REG_P(operands[3])
++ && REGNO(operands[0]) != REGNO(operands[2]) ){
++ emit_move_insn (operands[0], operands[2]);
++ operands[2] = operands[0];
++ }
+ }
+ )
+
++(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>_reg"
++ [(set (match_operand:ADDCC 0 "register_operand" "=r")
++ (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
++ [(match_operand:CMP 4 "register_operand" "r")
++ (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])
++ (match_dup 0)
++ (plus:ADDCC
++ (match_operand:ADDCC 2 "register_operand" "r")
++ (match_operand:ADDCC 3 "register_operand" "r"))))]
++ "TARGET_V2_INSNS"
++ {
++ operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
++ return "add%i1\t%0, %2, %3";
++ }
++ [(set_attr "length" "8")
++ (set_attr "cc" "cmp_cond_insn")])
+
+(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>"
+ [(set (match_operand:ADDCC 0 "register_operand" "=r")
+ ""
+ {
+ operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
-+
+ return "sub%i1\t%0, -%3";
+ }
+ [(set_attr "length" "8")
+ ""
+ {
+ operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
-+
++
+ switch( which_alternative ){
+ case 0:
+ return "mov%i1 %0, %3";
+ abort();
+ }
+
-+
+ }
+ [(set_attr "length" "8,8,12")
+ (set_attr "cc" "cmp_cond_insn")])
+
++
++
+
+;;=============================================================================
+;; jump
+ (set_attr "type" "call")]
+ )
+
++
+(define_insn "return_cond"
+ [(set (pc)
+ (if_then_else (match_operand 0 "avr32_comparison_operand" "")
+ "ret%0\tr12";
+ [(set_attr "type" "call")])
+
++(define_insn "return_cond_predicable"
++ [(return)]
++ "USE_RETURN_INSN (TRUE)"
++ "ret%?\tr12";
++ [(set_attr "type" "call")
++ (set_attr "predicable" "yes")])
++
+
+(define_insn "return_imm"
+ [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
+ (pc)))])]
+ "USE_RETURN_INSN (TRUE) &&
+ ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
-+ "ret%1\t%0";
-+ [(set_attr "type" "call")]
-+ )
++ "ret%1\t%0";
++ [(set_attr "type" "call")]
++ )
++
++(define_insn "return_imm_predicable"
++ [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
++ (use (reg RETVAL_REGNUM))
++ (return)])]
++ "USE_RETURN_INSN (TRUE) &&
++ ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
++ "ret%?\t%0";
++ [(set_attr "type" "call")
++ (set_attr "predicable" "yes")])
+
+(define_insn "return_<mode>reg"
+ [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r"))
+ (use (reg RETVAL_REGNUM))
+ (return)]
+ "USE_RETURN_INSN (TRUE)"
-+ "retal %0";
-+ [(set_attr "type" "call")]
-+ )
++ "ret%?\t%0";
++ [(set_attr "type" "call")
++ (set_attr "predicable" "yes")])
+
+(define_insn "return_<mode>reg_cond"
+ [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r"))
+ {
+ if ( TARGET_RETURN_STACK )
+ emit_insn ( gen_frs() );
++
+ avr32_load_pic_register ();
+ DONE;
+ }
+
+(define_insn "indirect_jump_internal"
+ [(set (pc)
-+ (match_operand:SI 0 "general_operand" "r,m,W"))]
++ (match_operand:SI 0 "avr32_non_rmw_general_operand" "r,m,W"))]
+ ""
+ {
+ switch( which_alternative ){
+ (set_attr "cc" "none,none,clobber")])
+
+
++
+;;=============================================================================
+;; casesi and tablejump
+;;=============================================================================
+ "add\tpc, %0, %1 << %p2"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
-+
++
+(define_insn "tablejump_insn"
+ [(set (pc) (match_operand:SI 0 "memory_operand" "m"))
+ (use (label_ref (match_operand 1 "" "")))]
+ rtx table_label = operands[3];
+ rtx oor_label = operands[4];
+
++ index = force_reg ( SImode, index );
+ if (low_bound != const0_rtx)
+ {
+ if (!avr32_const_ok_for_constraint_p(INTVAL (low_bound), 'I', \"Is21\")){
+ emit_insn (gen_addsi3 (reg, index,
+ GEN_INT (-INTVAL (low_bound))));
+ }
-+ index = reg;
++ index = reg;
+ }
+
+ if (!avr32_const_ok_for_constraint_p (INTVAL (range), 'K', \"Ks21\"))
+
+
+(define_insn "prefetch"
-+ [(prefetch (match_operand:SI 0 "register_operand" "r")
++ [(prefetch (match_operand:SI 0 "avr32_ks16_address_operand" "p")
+ (match_operand 1 "const_int_operand" "")
+ (match_operand 2 "const_int_operand" ""))]
+ ""
+ {
-+ return "pref\t%0[0]";
++ return "pref\t%0";
+ }
+
+ [(set_attr "length" "4")
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")]
+ VUNSPEC_EH_RETURN)
+ (clobber (match_scratch:SI 1 "=&r"))]
-+ ""
++ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+ }"
+ )
+
++
+;;=============================================================================
+;; ffssi2
+;;-----------------------------------------------------------------------------
+
+(define_insn_and_split "bswap_32"
+ [ (set (match_operand:SI 0 "avr32_bswap_operand" "=r,RKs14,r")
-+ (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_operand:SI 1 "avr32_bswap_operand" "=r,r,RKs14")
-+ (const_int 4278190080))
++ (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_operand:SI 1 "avr32_bswap_operand" "r,r,RKs14")
++ (const_int -16777216))
+ (const_int 24))
+ (lshiftrt:SI (and:SI (match_dup 1)
+ (const_int 16711680))
+ if ( REGNO(operands[0]) == REGNO(operands[1]))
+ return "swap.b\t%0";
+ else
-+ return "mov\t%0, %1\;swap.b\t%0";
++ return "#";
+ case 1:
+ return "stswp.w\t%0, %1";
+ case 2:
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0)
+ (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_dup 0)
-+ (const_int 4278190080))
++ (const_int -16777216))
+ (const_int 24))
+ (lshiftrt:SI (and:SI (match_dup 0)
+ (const_int 16711680))
+;; cache instructions
+;;-----------------------------------------------------------------------------
+(define_insn "cache"
-+ [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r")
++ [ (unspec_volatile [(match_operand:SI 0 "avr32_ks11_address_operand" "p")
+ (match_operand:SI 1 "immediate_operand" "Ku05")] VUNSPEC_CACHE)]
+ ""
-+ "cache %0[0], %1"
++ "cache %0, %1"
+ [(set_attr "length" "4")]
+ )
+
+ [(set_attr "length" "2")]
+ )
+
++(define_insn "ssrf"
++ [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_SSRF)]
++ ""
++ "ssrf %0"
++ [(set_attr "length" "2")
++ (set_attr "cc" "clobber")]
++ )
++
++(define_insn "csrf"
++ [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_CSRF)]
++ ""
++ "csrf %0"
++ [(set_attr "length" "2")
++ (set_attr "cc" "clobber")]
++ )
++
+;;=============================================================================
+;; Flush Return Stack instruction
+;;-----------------------------------------------------------------------------
+;;-----------------------------------------------------------------------------
+;; Changing
+;; mul rd, rx, ry
-+;; add rd2, rd
++;; add rd2, rd
++;; or
++;; add rd2, rd, rd2
+;; to
+;; mac rd2, rx, ry
+;;=============================================================================
+ (match_dup 3)))]
+ "")
+
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (mult:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "register_operand" "")))
++ (set (match_operand:SI 3 "register_operand" "")
++ (plus:SI (match_dup 0)
++ (match_dup 3)))]
++ "peep2_reg_dead_p(2, operands[0])"
++ [(set (match_dup 3)
++ (plus:SI (mult:SI (match_dup 1)
++ (match_dup 2))
++ (match_dup 3)))]
++ "")
+
+
+;;=============================================================================
+;;=================================================================
+
+(define_insn "*round"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (ashiftrt:SI (plus:SI (match_operand:SI 1 "register_operand" "0")
-+ (match_operand:SI 2 "immediate_operand" "i"))
-+ (match_operand:SI 3 "immediate_operand" "i")))]
-+ "avr32_rnd_operands(operands[2], operands[3])"
++ [(set (match_operand:SI 0 "register_operand" "+r")
++ (ashiftrt:SI (plus:SI (match_dup 0)
++ (match_operand:SI 1 "immediate_operand" "i"))
++ (match_operand:SI 2 "immediate_operand" "i")))]
++ "avr32_rnd_operands(operands[1], operands[2])"
+
-+ "satrnds %0 >> %3, 31"
++ "satrnds %0 >> %2, 31"
+
+ [(set_attr "type" "alu_sat")
+ (set_attr "length" "4")]
+ [(set_attr "length" "10")
+ (set_attr "cc" "none")
+ (set_attr "type" "call")])
++
+
+
+;;=================================================================
+;; Load the SIMD description
+(include "simd.md")
+
-+;; Load the FP coprocessor patterns
++;; Load the FP coprAocessor patterns
+(include "fpcp.md")
-Index: gcc-4.2.3/gcc/config/avr32/avr32-modes.def
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/avr32-modes.def 2008-05-21 13:45:54.165287871 +0200
+--- /dev/null
++++ b/gcc/config/avr32/avr32-modes.def
@@ -0,0 +1 @@
+VECTOR_MODES (INT, 4); /* V4QI V2HI */
-Index: gcc-4.2.3/gcc/config/avr32/avr32.opt
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/avr32.opt 2008-05-21 13:45:54.165287871 +0200
-@@ -0,0 +1,73 @@
+--- /dev/null
++++ b/gcc/config/avr32/avr32.opt
+@@ -0,0 +1,86 @@
+; Options for the ATMEL AVR32 port of the compiler.
+
+; Copyright 2007 Atmel Corporation.
+
+muse-rodata-section
+Target Report Mask(USE_RODATA_SECTION)
-+Do not put readonly-data in .text section, but in .rodata.
++Use section .rodata for read-only data instead of .text.
+
+mhard-float
+Target Report Undocumented Mask(HARD_FLOAT)
+Target Report Undocumented InverseMask(HARD_FLOAT, SOFT_FLOAT)
+Use software floating-point library for floating-point operations.
+
-+force-double-align
++mforce-double-align
+Target Report RejectNegative Mask(FORCE_DOUBLE_ALIGN)
+Force double-word alignment for double-word memory accesses.
+
+Target Report Var(avr32_imm_in_const_pool) Init(-1)
+Put large immediates in constant pool. This is enabled by default for archs with insn-cache.
+
-Index: gcc-4.2.3/gcc/config/avr32/avr32-protos.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/avr32-protos.h 2008-05-21 13:45:54.165287871 +0200
-@@ -0,0 +1,185 @@
++mno-pic
++Target Report RejectNegative Mask(NO_PIC)
++Do not generate position-independent code. (deprecated, use -fno-pic instead)
++
++mcond-exec-before-reload
++Target Report Undocumented Mask(COND_EXEC_BEFORE_RELOAD)
++Enable experimental conditional execution preparation before the reload stage.
++
++mrmw-addressable-data
++Target Report Mask(RMW_ADDRESSABLE_DATA)
++Signal that all data is in range for the Atomic Read-Modify-Write memory instructions, and that
++gcc can safely generate these whenever possible.
++
+--- /dev/null
++++ b/gcc/config/avr32/avr32-protos.h
+@@ -0,0 +1,196 @@
+/*
+ Prototypes for exported functions defined in avr32.c
+ Copyright 2003-2006 Atmel Corporation.
+int set_next_insn_cond (rtx cur_insn, rtx cond);
+void avr32_override_options (void);
+void avr32_load_pic_register (void);
++#ifdef GCC_BASIC_BLOCK_H
++rtx avr32_ifcvt_modify_insn (ce_if_block_t *ce_info, rtx pattern, rtx insn,
++ int *num_true_changes);
++rtx avr32_ifcvt_modify_test (ce_if_block_t *ce_info, rtx test );
++void avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info, int *num_true_changes);
++#endif
+void avr32_optimization_options (int level, int size);
++int avr32_const_ok_for_move (HOST_WIDE_INT c);
++
+void avr32_split_const_expr (enum machine_mode mode,
+ enum machine_mode new_mode,
+ rtx expr,
+ rtx const_expr,
+ HOST_WIDE_INT *val);
+
++int avr32_cond_imm_clobber_splittable (rtx insn,
++ rtx operands[]);
++
+
+#endif /* AVR32_PROTOS_H */
-Index: gcc-4.2.3/gcc/config/avr32/crti.asm
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/crti.asm 2008-05-21 13:45:54.165287871 +0200
+--- /dev/null
++++ b/gcc/config/avr32/crti.asm
@@ -0,0 +1,64 @@
+/*
+ Init/fini stuff for AVR32.
+1: .long 0b - _GLOBAL_OFFSET_TABLE_
+2:
+
-Index: gcc-4.2.3/gcc/config/avr32/crtn.asm
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/crtn.asm 2008-05-21 13:45:54.165287871 +0200
+--- /dev/null
++++ b/gcc/config/avr32/crtn.asm
@@ -0,0 +1,44 @@
+/* Copyright (C) 2001 Free Software Foundation, Inc.
+ Written By Nick Clifton
+ .section ".fini"
+ ldm sp++, r6, pc
+
-Index: gcc-4.2.3/gcc/config/avr32/fpcp.md
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/fpcp.md 2008-05-21 13:45:54.169287542 +0200
+--- /dev/null
++++ b/gcc/config/avr32/fpcp.md
@@ -0,0 +1,551 @@
+;; AVR32 machine description file for Floating-Point instructions.
+;; Copyright 2003-2006 Atmel Corporation.
+ [(set_attr "type" "fstm")
+ (set_attr "length" "4")
+ (set_attr "cc" "none")])
-Index: gcc-4.2.3/gcc/config/avr32/lib1funcs.S
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/lib1funcs.S 2008-05-21 13:45:54.173288052 +0200
-@@ -0,0 +1,2589 @@
-+
+--- /dev/null
++++ b/gcc/config/avr32/lib1funcs.S
+@@ -0,0 +1,2874 @@
++/* Macro for moving immediate value to register. */
++.macro mov_imm reg, imm
++.if (((\imm & 0xfffff) == \imm) || ((\imm | 0xfff00000) == \imm))
++ mov \reg, \imm
++#if __AVR32_UC__ >= 2
++.elseif ((\imm & 0xffff) == 0)
++ movh \reg, hi(\imm)
++
++#endif
++.else
++ mov \reg, lo(\imm)
++ orh \reg, hi(\imm)
++.endif
++.endm
++
++
++
+/* Adjust the unpacked double number if it is a subnormal number.
+ The exponent and mantissa pair are stored
+ in [mant_hi,mant_lo] and [exp]. A register with the correct sign bit in
+ subcs \exp, -2 /* Adjust exponent if we overflowed */
+.endm
+
-+/* Scale mantissa [mant_hi, mant_lo] with amount [shift_count].
-+ Uses scratch registers [scratch1] and [scratch2] */
-+.macro scale_df shift_count, mant_lo, mant_hi, scratch1, scratch2
-+ /* Scale [mant_hi, mant_lo] with shift_amount.
-+ Must not forget the sticky bits we intend to shift out. */
-+
-+ rsub \scratch1,\shift_count,32/* get (32 - shift count)
-+ (if shift count > 32 we get a
-+ negative value, but that will
-+ work as well in the code below.) */
-+
-+ cp.w \shift_count,32 /* handle shifts >= 32 separately */
-+ brhs 70f
-+
-+ /* small (<32) shift amount, both words are part of the shift
-+ first remember whether part that is lost contains any 1 bits ... */
-+ lsl \scratch2,\mant_lo,\scratch1 /*shift away bits that are part of
-+ final mantissa. only part that goes
-+ to scratch2 are bits that will be lost */
-+
-+ /* ... and now to the actual shift */
-+ lsl \scratch1,\mant_hi,\scratch1 /* get bits from msw destined for lsw*/
-+ lsr \mant_lo,\mant_lo,\shift_count /* shift down lsw of mantissa */
-+ lsr \mant_hi,\mant_hi,\shift_count /* shift down msw of mantissa */
-+ or \mant_lo,\scratch1 /* combine these bits with prepared lsw*/
-+ rjmp 71f
-+
-+ /* large (>=32) shift amount, only lsw will have bits left after shift.
-+ note that shift operations will use ((shift count) mod 32) so
-+ we do not need to subtract 32 from shift count. */
-+70:
-+ /* first remember whether part that is lost contains any 1 bits ... */
-+ lsl \scratch2,\mant_hi,\scratch1 /* save all lost bits from msw */
-+ or \scratch2,\mant_lo /* also save lost bits (all) from lsw
-+ now scratch2<>0 if we lose any bits */
-+
-+ /* ... and now to the actual shift */
-+ mov \mant_lo,\mant_hi /* msw -> lsw (i.e. "shift 32 first")*/
-+ mov \mant_hi,0 /* clear msw */
-+ lsr \mant_lo,\mant_lo,\shift_count /* make rest of shift inside lsw*/
-+
-+71:
-+ cp.w \scratch2,0 /* if any '1' bit in part we lost ...*/
-+ breq 70f
-+
-+ sbr \mant_lo,0 /* ... we need to set sticky bit*/
-+70:
-+.endm
-+
-+/* Unpack exponent and mantissa from the double number
-+ stored in [df_hi,df_lo]. The exponent is stored in [exp]
-+ while the mantissa is stored in [df_hi,df_lo]. */
-+
-+.macro unpack_df exp, df_lo, df_hi
-+ lsr \exp, \df_hi,21 /* Extract exponent */
-+ lsl \df_hi,10 /* Get mantissa */
-+ or \df_hi,\df_hi,\df_lo>>21
-+ lsl \df_lo,11
-+
-+ neg \exp /* Fix implicit bit */
-+ bst \df_hi,31
-+ subeq \exp,1
-+ neg \exp /* negate back exponent */
-+ .endm
-+
-+/* Unpack exponent and mantissa from the single float number
-+ stored in [sf]. The exponent is stored in [exp]
-+ while the mantissa is stored in [sf]. */
-+.macro unpack_sf exp, sf
-+ lsr \exp, \sf, 24
-+ brne 80f
-+ /* Fix subnormal number */
-+ lsl \sf,7
-+ clz \exp,\sf
-+ lsl \sf,\sf,\exp
-+ rsub \exp,\exp,1
-+ rjmp 81f
-+80:
-+ lsl \sf,7
-+ sbr \sf, 31 /*Implicit bit*/
-+81:
-+.endm
-+
-+
++
+
+/* Pack a single float number stored in [mant] and [exp]
+ into a single float number in [sf] */
+ clz \scratch1,\mant_lo /* shift mantissa */
+ movcs \scratch1, 0
+ subcc \scratch1,-32
-+ mov \mant_hi,\mant_lo
-+ lsl \mant_hi,\mant_hi,\scratch1
++ lsl \mant_hi,\mant_lo,\scratch1
+ mov \mant_lo,0
+ sub \exp,\scratch1 /* adjust exponent */
+80:
+.endm
+
+
-+
+
-+#ifdef L_avr32_f64_mul
++#if defined(L_avr32_f64_mul) || defined(L_avr32_f64_mul_fast)
+ .align 2
++#if defined(L_avr32_f64_mul)
+ .global __avr32_f64_mul
+ .type __avr32_f64_mul,@function
-+
-+
+__avr32_f64_mul:
++#else
++ .global __avr32_f64_mul_fast
++ .type __avr32_f64_mul_fast,@function
++__avr32_f64_mul_fast:
++#endif
+ or r12, r10, r11 << 1
+ breq __avr32_f64_mul_op1_zero
+
++#if defined(L_avr32_f64_mul)
++ pushm r4-r7, lr
++#else
+ stm --sp, r5,r6,r7,lr
++#endif
++
++#define AVR32_F64_MUL_OP1_INT_BITS 1
++#define AVR32_F64_MUL_OP2_INT_BITS 10
++#define AVR32_F64_MUL_RES_INT_BITS 11
++
+ /* op1 in {r11,r10}*/
+ /* op2 in {r9,r8}*/
+ eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */
+
-+ /* Unpack op1 */
++ /* Unpack op1 to 1.63 format*/
+ /* exp: r7 */
+ /* sf: r11, r10 */
-+ lsr r7, r11, 20 /* Extract exponent */
-+
-+ lsl r11, 11 /* Extract mantissa, leave room for implicit bit */
-+ or r11, r11, r10>>21
-+ lsl r10, 11
-+ sbr r11, 31 /* Insert implicit bit */
++ bfextu r7, r11, 20, 11 /* Extract exponent */
++
++ mov r5, 1
+
-+ cbr r7, 11 /* Clear sign bit */
+ /* Check if normalization is needed */
+ breq __avr32_f64_mul_op1_subnormal /*If number is subnormal, normalize it */
+
++ lsl r11, (12-AVR32_F64_MUL_OP1_INT_BITS-1) /* Extract mantissa, leave room for implicit bit */
++ or r11, r11, r10>>(32-(12-AVR32_F64_MUL_OP1_INT_BITS-1))
++ lsl r10, (12-AVR32_F64_MUL_OP1_INT_BITS-1)
++ bfins r11, r5, 32 - (1 + AVR32_F64_MUL_OP1_INT_BITS), 1 + AVR32_F64_MUL_OP1_INT_BITS /* Insert implicit bit */
++
++
+22:
-+ /* Unpack op2 */
++ /* Unpack op2 to 10.54 format */
+ /* exp: r6 */
+ /* sf: r9, r8 */
-+ lsr r6, r9, 20 /* Extract exponent */
-+
-+ lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
-+ or r9, r9, r8>>21
-+ lsl r8, 11
-+ sbr r9, 31 /* Insert implicit bit */
++ bfextu r6, r9, 20, 11 /* Extract exponent */
+
-+ cbr r6, 11 /* Clear sign bit */
+ /* Check if normalization is needed */
+ breq __avr32_f64_mul_op2_subnormal /*If number is subnormal, normalize it */
++
++ lsl r8, 1 /* Extract mantissa, leave room for implicit bit */
++ rol r9
++ bfins r9, r5, 32 - (1 + AVR32_F64_MUL_OP2_INT_BITS), 1 + AVR32_F64_MUL_OP2_INT_BITS /* Insert implicit bit */
++
+23:
+
+ /* Check if any operands are NaN or INF */
+ add r12, r7, r6
+ sub r12, (1023-1)
+
++#if defined(L_avr32_f64_mul)
++ /* Do the multiplication.
++ Place result in [r11, r10, r7, r6]. The result is in 11.117 format. */
++ mulu.d r4, r11, r8
++ macu.d r4, r10, r9
++ mulu.d r6, r10, r8
++ mulu.d r10, r11, r9
++ add r7, r4
++ adc r10, r10, r5
++ acr r11
++#else
++ /* Do the multiplication using approximate calculation. discard the al x bl
++ calculation.
++ Place result in [r11, r10, r7]. The result is in 11.85 format. */
++
+ /* Do the multiplication using approximate calculation.
+ Place result in r11, r10. Use r7, r6 as scratch registers */
-+ mul_approx_df r11 /*ah*/, r10 /*al*/, r9 /*bh*/, r8 /*bl*/, r11 /*rh*/, r10 /*rl*/, r7 /*sh*/, r6 /*sl*/
-+
-+ /* Check if result is zero */
-+ breq __avr32_f64_mul_res_subnormal
-+
++ mulu.d r6, r11, r8
++ macu.d r6, r10, r9
++ mulu.d r10, r11, r9
++ add r10, r7
++ acr r11
++#endif
+ /* Adjust exponent and mantissa */
-+ /* [r12]:exp, [r11, r10]:mant, [r9,r8,r7]:scratch*/
-+ /* Mantissa may be of the format 0.xxxx or 1.xxxx. */
++ /* [r12]:exp, [r11, r10]:mant [r7, r6]:sticky bits */
++ /* Mantissa may be of the format 00000000000.0xxx or 00000000000.1xxx. */
+ /* In the first case, shift one pos to left.*/
-+ sub r9, r12, 1
-+ mov r8, r11
-+ lsl r7, r10, 1
-+ rol r8
-+ bld r11, 31
-+ movne r12, r9
-+ movne r11, r8
-+ movne r10, r7
++ bld r11, 32-AVR32_F64_MUL_RES_INT_BITS-1
++ breq 0f
++ lsl r7, 1
++ rol r10
++ rol r11
++ sub r12, 1
++0:
+ cp r12, 0
-+ breq __avr32_f64_mul_res_subnormal /*Result was subnormal. Flush-to-zero and return zero*/
++ brle __avr32_f64_mul_res_subnormal /*Result was subnormal.*/
+
+ /* Check for Inf. */
+ cp.w r12, 0x7ff
+ brge __avr32_f64_mul_res_inf
-+
++
++ /* Insert exponent. */
++ bfins r11, r12, 20, 11
++
+ /* Result was not subnormal. Perform rounding. */
-+ /* Because of performance optimization, we have no sticky bit, */
-+ /* so round-to-even won't work as specified in the IEEE standard.*/
-+ /* [r12]:exp, [r11, r10]:mant */
-+ /* Mantissa is in 0.64 format. Round by adding 1<<(64-(52+2))=1<<10*/
-+ /* That is, 1 in the MSB of the part that will be discarded in final packing.*/
-+ mov r9, (1<<10)
-+ add r10, r9
-+ acr r11
-+ /* Adjust exponent if we overflowed.*/
-+ subcs r12, -1
-+
++ /* For the fast version we discard the sticky bits and always round
++ the halfwaycase up. */
++24:
++#if defined(L_avr32_f64_mul)
++ or r6, r6, r10 << 31 /* Or in parity bit into stickybits */
++ or r7, r7, r6 >> 1 /* Or together sticky and still make the msb
++ of r7 represent the halfway bit. */
++ eorh r7, 0x8000 /* Toggle halfway bit. */
++ /* We should now round up by adding one for the following cases:
++
++ halfway sticky|parity round-up
++ 0 x no
++ 1 0 no
++ 1 1 yes
++
++ Since we have inverted the halfway bit we can use the satu instruction
++ by saturating to 1 bit to implement this.
++ */
++ satu r7 >> 0, 1
++#else
++ lsr r7, 31
++#endif
++ add r10, r7
++ acr r11
+
-+ /* Pack final result*/
-+ /* Input: [r12]:exp, [r11, r10]:mant */
-+ /* Result in [r11,r10] */
-+ /* Insert mantissa */
-+ cbr r11, 31 /*Clear implicit bit*/
-+ lsr r10, 11
-+ or r10, r10, r11<<21
-+ lsr r11, 11
-+ /* Insert exponent and sign bit*/
-+ or r11, r11, r12<<20
++ /* Insert sign bit*/
+ bld lr, 31
+ bst r11, 31
+
+ /* Return result in [r11,r10] */
++#if defined(L_avr32_f64_mul)
++ popm r4-r7, pc
++#else
+ ldm sp++, r5, r6, r7,pc
++#endif
+
+
+__avr32_f64_mul_op1_subnormal:
-+ cbr r11, 31 /* Clear implicit bit. */
-+ normalize_df r7 /*exp*/, r10, r11 /*Mantissa*/, r5, r12 /*scratch*/
++ andh r11, 0x000f /* Remove sign bit and exponent */
++ clz r12, r10 /* Count leading zeros in lsw */
++ clz r6, r11 /* Count leading zeros in msw */
++ subcs r12, -32 + AVR32_F64_MUL_OP1_INT_BITS
++ movcs r6, r12
++ subcc r6, AVR32_F64_MUL_OP1_INT_BITS
++ cp.w r6, 32
++ brge 0f
++
++ /* shifting involves both msw and lsw*/
++ rsub r12, r6, 32 /* shift mantissa */
++ lsl r11, r11, r6
++ lsr r12, r10, r12
++ or r11, r12
++ lsl r10, r10, r6
++ sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS
++ sub r7, r6 /* adjust exponent */
++ rjmp 22b /* Finished */
++0:
++ /* msw is zero so only need to consider lsw */
++ lsl r11, r10, r6
++ breq __avr32_f64_mul_res_zero
++ mov r10, 0
++ sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS
++ sub r7, r6 /* adjust exponent */
+ rjmp 22b
++
+
+__avr32_f64_mul_op2_subnormal:
-+ cbr r9, 31 /* Clear implicit bit. */
-+ normalize_df r6 /*exp*/, r8, r9 /*Mantissa*/, r5, r12 /*scratch*/
++ andh r9, 0x000f /* Remove sign bit and exponent */
++ clz r12, r8 /* Count leading zeros in lsw */
++ clz r5, r9 /* Count leading zeros in msw */
++ subcs r12, -32 + AVR32_F64_MUL_OP2_INT_BITS
++ movcs r5, r12
++ subcc r5, AVR32_F64_MUL_OP2_INT_BITS
++ cp.w r5, 32
++ brge 0f
++
++ /* shifting involves both msw and lsw*/
++ rsub r12, r5, 32 /* shift mantissa */
++ lsl r9, r9, r5
++ lsr r12, r8, r12
++ or r9, r12
++ lsl r8, r8, r5
++ sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS
++ sub r6, r5 /* adjust exponent */
++ rjmp 23b /* Finished */
++0:
++ /* msw is zero so only need to consider lsw */
++ lsl r9, r8, r5
++ breq __avr32_f64_mul_res_zero
++ mov r8, 0
++ sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS
++ sub r6, r5 /* adjust exponent */
+ rjmp 23b
+
+
+__avr32_f64_mul_op_nan_or_inf:
+ /* Same code for OP1 and OP2*/
+ /* Since we are here, at least one of the OPs were NaN or INF*/
-+ /* Shift out implicit bit of both operands' mantissa */
-+ lsl r11, 1
-+ lsl r9, 1
++ andh r9, 0x000f /* Remove sign bit and exponent */
++ andh r11, 0x000f /* Remove sign bit and exponent */
+ /* Merge the regs in each operand to check for zero*/
+ or r11, r10 /* op1 */
+ or r9, r8 /* op2 */
+ breq __avr32_f64_mul_res_inf /*op2 was INF, return INF*/
+ rjmp __avr32_f64_mul_res_nan /*else return NaN*/
+
-+__avr32_f64_mul_res_subnormal:/* Multiply result was subnormal. Return zero. */
++__avr32_f64_mul_res_subnormal:/* Multiply result was subnormal. */
++#if defined(L_avr32_f64_mul)
++ /* Check how much we must scale down the mantissa. */
++ neg r12
++ sub r12, -1 /* We do no longer have an implicit bit. */
++ satu r12 >> 0, 6 /* Saturate shift amount to max 63. */
++ cp.w r12, 32
++ brge 0f
++ /* Shift amount <32 */
++ rsub r8, r12, 32
++ or r6, r7
++ lsr r7, r7, r12
++ lsl r9, r10, r8
++ or r7, r9
++ lsr r10, r10, r12
++ lsl r9, r11, r8
++ or r10, r9
++ lsr r11, r11, r12
++ rjmp 24b
++0:
++ /* Shift amount >=32 */
++ rsub r8, r12, 32
++ moveq r9, 0
++ breq 0f
++ lsl r9, r11, r8
++0:
++ or r6, r7
++ or r6, r6, r10 << 1
++ lsr r10, r10, r12
++ or r7, r9, r10
++ lsr r10, r11, r12
++ mov r11, 0
++ rjmp 24b
++#else
++ /* Flush to zero for the fast version. */
++ mov r11, lr /*Get correct sign*/
++ andh r11, 0x8000, COH
++ mov r10, 0
++ ldm sp++, r5, r6, r7,pc
++#endif
++
++__avr32_f64_mul_res_zero:/* Multiply result is zero. */
+ mov r11, lr /*Get correct sign*/
+ andh r11, 0x8000, COH
+ mov r10, 0
++#if defined(L_avr32_f64_mul)
++ popm r4-r7, pc
++#else
+ ldm sp++, r5, r6, r7,pc
++#endif
+
+__avr32_f64_mul_res_nan: /* Return NaN. */
+ mov r11, -1
+ mov r10, -1
++#if defined(L_avr32_f64_mul)
++ popm r4-r7, pc
++#else
+ ldm sp++, r5, r6, r7,pc
++#endif
+
+__avr32_f64_mul_res_inf: /* Return INF. */
-+ mov r11, 0
-+ orh r11, 0x7ff0
++ mov r11, 0xfff00000
+ bld lr, 31
+ bst r11, 31
+ mov r10, 0
++#if defined(L_avr32_f64_mul)
++ popm r4-r7, pc
++#else
+ ldm sp++, r5, r6, r7,pc
++#endif
+
+__avr32_f64_mul_op1_zero:
+ /* Get sign */
+ cbr r11, 31
+ cbr r9, 31
+
-+ /* Put the number with the largest exponent in [r11, r10]
-+ and the number with the smallest exponent in [r9, r8] */
-+ cp r11, r9
++ /* Put the largest number in [r11, r10]
++ and the smallest number in [r9, r8] */
++ cp r10, r8
++ cpc r11, r9
+ brhs 1f /* Skip swap if operands already correctly ordered*/
+ /* Operands were not correctly ordered, swap them*/
+ mov r7, r11
+ /* exp: r7 */
+ /* sf: r11, r10 */
+ lsr r7, r11, 20 /* Extract exponent */
-+ cbr r7, 11 /* Clear sign bit */
+ lsl r11, 11 /* Extract mantissa, leave room for implicit bit */
+ or r11, r11, r10>>21
+ lsl r10, 11
+ /* exp: r6 */
+ /* sf: r9, r8 */
+ lsr r6, r9, 20 /* Extract exponent */
-+ cbr r6, 11 /* Clear sign bit */
+ breq __avr32_f64_sub_opL_subnormal /* If either zero or subnormal */
+ lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
+ or r9, r9, r8>>21
+ breq __avr32_f64_sub_longnormalize_done /* No need for scaling if no zeros in high bits */
+ brcs __avr32_f64_sub_longnormalize
+
++
+ /* shift amount is smaller than 32, and involves both msw and lsw*/
+ rsub lr,r6,32 /* shift mantissa */
+ lsl r11,r11,r6
+ lsr lr,r10,lr
+ or r11,lr
+ lsl r10,r10,r6
++
+ sub r7,r6 /* adjust exponent */
+ brle __avr32_f64_sub_subnormal_result
-+
+__avr32_f64_sub_longnormalize_done:
+
+#if defined(L_avr32_f64_addsub)
+ /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */
+ /* Result in [r11,r10] */
+ /* Insert mantissa */
-+ cbr r11, 31 /*Clear implicit bit*/
+ lsr r10, 11
+ or r10, r10, r11<<21
+ lsr r11, 11
+ /* Insert exponent and sign bit*/
-+ or r11, r11, r7<<20
-+ bld r12, 31
-+ bst r11, 31
++ bfins r11, r7, 20, 11
++ or r11, r12
+
+ /* Round */
+__avr32_f64_sub_round:
+#if defined(L_avr32_f64_addsub)
-+ mov r7, 0
-+ sbr r7, 31
++ mov_imm r7, 0x80000000
+ bld r10, 0
+ subne r7, -1
+
+ /* Set exponent to 1 if we do not have a zero. */
+ or lr, r9, r8
+ movne r6,1
-+
++
+ /* Check if opH is also subnormal. If so, clear implicit bit in r11*/
+ rsub lr, r7, 0
+ moveq r7,1
+ bst r11, 31
-+
++
+ /* Check if op1 is zero, if so set exponent to 0. */
+ or lr, r11, r10
+ moveq r7,0
-+
++
+ rjmp __avr32_f64_sub_opL_subnormal_done
+
+__avr32_f64_sub_opH_nan_or_inf:
+ /* opH is Inf. */
+ /* Check if opL is Inf. or NaN */
+ cp.w r6, 0x7ff
-+ breq __avr32_f64_sub_opL_nan_or_inf
++ breq __avr32_f64_sub_return_nan
++ /* Return infinity with correct sign. */
++ or r11, r12, r7 << 20
+ ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */
-+__avr32_f64_sub_opL_nan_or_inf:
-+ cbr r9, 31
-+ or lr, r9, r8
-+ brne __avr32_f64_sub_return_nan
-+ mov r10, 0 /* Generate Inf in r11, r10 */
-+ mov r11, 0
-+ orh r11, 0x7ff0
-+ ldm sp++, r5, r6, r7, pc/* opL Inf, return Inf */
+__avr32_f64_sub_return_nan:
+ mov r10, -1 /* Generate NaN in r11, r10 */
+ mov r11, -1
+
+
+__avr32_f64_sub_subnormal_result:
++#if defined(L_avr32_f64_addsub)
++ /* Check how much we must scale down the mantissa. */
++ neg r7
++ sub r7, -1 /* We do no longer have an implicit bit. */
++ satu r7 >> 0, 6 /* Saturate shift amount to max 63. */
++ cp.w r7, 32
++ brge 0f
++ /* Shift amount <32 */
++ rsub r8, r7, 32
++ lsl r9, r10, r8
++ srne r6
++ lsr r10, r10, r7
++ or r10, r6 /* Sticky bit from the
++ part that was shifted out. */
++ lsl r9, r11, r8
++ or r10, r10, r9
++ lsr r11, r10, r7
++ /* Set exponent */
++ mov r7, 0
++ rjmp __avr32_f64_sub_longnormalize_done
++0:
++ /* Shift amount >=32 */
++ rsub r8, r7, 64
++ lsl r9, r11, r8
++ or r9, r10
++ srne r6
++ lsr r10, r11, r7
++ or r10, r6 /* Sticky bit from the
++ part that was shifted out. */
++ mov r11, 0
++ /* Set exponent */
++ mov r7, 0
++ rjmp __avr32_f64_sub_longnormalize_done
++#else
+ /* Just flush subnormals to zero. */
+ mov r10, 0
+ mov r11, 0
++#endif
+ ldm sp++, r5, r6, r7, pc
-+
+
+__avr32_f64_sub_longshift:
+ /* large (>=32) shift amount, only lsw will have bits left after shift.
+ /* Saturate the shift amount to 63. If the amount
+ is any larger op2 is insignificant. */
+ satu r6 >> 0, 6
++
+#if defined(L_avr32_f64_addsub)
+ /* first remember whether part that is lost contains any 1 bits ... */
++ moveq lr, r8 /* If shift amount is 32, no bits from msw are lost. */
++ breq 0f
+ lsl lr,r9,r5 /* save all lost bits from msw */
+ or lr,r8 /* also save lost bits (all) from lsw
+ now lr != 0 if we lose any bits */
+#endif
++0:
+ /* ... and now to the actual shift */
-+ mov r8,r9 /* msw -> lsw (i.e. "shift 32 first")*/
++ lsr r8,r9,r6 /* Move msw to lsw and shift. */
+ mov r9,0 /* clear msw */
-+ lsr r8,r8,r6 /* make rest of shift inside lsw*/
+#if defined(L_avr32_f64_addsub)
+ cp.w lr,0 /* if any '1' bit in part we lost ...*/
-+ sreq lr
-+ or r8, lr /* ... we need to set sticky bit*/
++ srne lr
++ or r8, lr /* ... we need to set sticky bit*/
+#endif
+ rjmp __avr32_f64_sub_shift_done
+
+ zero so force exponent to zero. */
+ movcs r7, 0
+ movcs r6, 0
++ movcs r12, 0 /* Also clear sign bit. A zero result from subtraction
++ always is +0.0 */
+ subcc r6,-32
-+ mov r11,r10
-+ lsl r11,r11,r6
++ lsl r11,r10,r6
+ mov r10,0
+ sub r7,r6 /* adjust exponent */
+ brle __avr32_f64_sub_subnormal_result
+ /* Put the number with the largest exponent in [r11, r10]
+ and the number with the smallest exponent in [r9, r8] */
+ cp r11, r9
-+ brhs 1f /* Skip swap if operands already correctly ordered*/
-+ /* Operands were not correctly ordered, swap them*/
++ brhs 1f /* Skip swap if operands already correctly ordered */
++ /* Operands were not correctly ordered, swap them */
+ mov r7, r11
+ mov r11, r9
+ mov r9, r7
+ mov r10, r8
+ mov r8, r7
+1:
++ mov lr, 0 /* Set sticky bits to zero */
+ /* Unpack largest operand - opH */
+ /* exp: r7 */
+ /* sf: r11, r10 */
-+ lsr r7, r11, 20 /* Extract exponent */
-+ cbr r7, 11 /* Clear sign bit */
-+ lsl r11, 11 /* Extract mantissa, leave room for implicit bit */
-+ or r11, r11, r10>>21
-+ lsl r10, 11
-+ sbr r11, 31 /* Insert implicit bit */
++ bfextu R7, R11, 20, 11 /* Extract exponent */
++ bfextu r11, r11, 0, 20 /* Extract mantissa */
++ sbr r11, 20 /* Insert implicit bit */
+
-+
+ /* Unpack smallest operand - opL */
+ /* exp: r6 */
+ /* sf: r9, r8 */
-+ lsr r6, r9, 20 /* Extract exponent */
-+ cbr r6, 11 /* Clear sign bit */
-+ breq __avr32_f64_add_opL_subnormal /* If either zero or subnormal */
-+ lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
-+ or r9, r9, r8>>21
-+ lsl r8, 11
-+ sbr r9, 31 /* Insert implicit bit */
++ bfextu R6, R9, 20, 11 /* Extract exponent */
++ breq __avr32_f64_add_op2_subnormal
++ bfextu r9, r9, 0, 20 /* Extract mantissa */
++ sbr r9, 20 /* Insert implicit bit */
+
-+
-+__avr32_f64_add_opL_subnormal_done:
++2:
+ /* opH is NaN or Inf. */
+ cp.w r7, 0x7ff
+ breq __avr32_f64_add_opH_nan_or_inf
+ lsr r8,r8,r6 /* shift down lsw of mantissa */
+ lsr r9,r9,r6 /* shift down msw of mantissa */
+ or r8,r5 /* combine these bits with prepared lsw*/
-+#if defined(L_avr32_f64_addsub)
-+ cp.w lr,0 /* if any '1' bit in part we lost ...*/
-+ srne lr
-+ or r8, lr /* ... we need to set sticky bit*/
-+#endif
+
+__avr32_f64_add_shift_done:
+ /* Now add the mantissas. */
+ add r10, r8
+ adc r11, r11, r9
-+
++
+ /* Check if we overflowed. */
-+ brcs __avr32_f64_add_res_of
-+
++ bld r11, 21
++ breq __avr32_f64_add_res_of:
++
+__avr32_f64_add_res_of_done:
+
-+#if defined(L_avr32_f64_addsub)
-+ /* Insert the bits we will remove from the mantissa r9[31:21] */
-+ lsl r9, r10, (32 - 11)
-+#else
-+ /* Keep the last bit shifted out. */
-+ bfextu r9, r10, 10, 1
-+#endif
-+
+ /* Pack final result*/
+ /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */
+ /* Result in [r11,r10] */
-+ /* Insert mantissa */
-+ cbr r11, 31 /*Clear implicit bit*/
-+ lsr r10, 11
-+ or r10, r10, r11<<21
-+ lsr r11, 11
+ /* Insert exponent and sign bit*/
-+ or r11, r11, r7<<20
-+ bld r12, 31
-+ bst r11, 31
++ bfins r11, r7, 20, 11
++ or r11, r12
+
+ /* Round */
+__avr32_f64_add_round:
+#if defined(L_avr32_f64_addsub)
-+ mov r7, 0
-+ sbr r7, 31
-+ bld r10, 0
-+ subne r7, -1
-+
-+ cp.w r9, r7
-+ srhs r9
-+#endif
-+ add r10, r9
-+ acr r11
-+
-+ /* Return result in [r11,r10] */
-+ ldm sp++, r5, r6, r7,pc
-+
-+
-+
-+__avr32_f64_add_opL_subnormal:
-+ /* Extract the of mantissa */
-+ lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
-+ or r9, r9, r8>>21
-+ lsl r8, 11
-+
-+ /* Set exponent to 1 if we do not have a zero. */
-+ or lr, r9, r8
-+ movne r6,1
-+
-+ /* Check if opH is also subnormal. If so, clear implicit bit in r11*/
-+ rsub lr, r7, 0
-+ moveq r7,1
-+ bst r11, 31
-+
-+ /* Check if op1 is zero, if so set exponent to 0. */
-+ or lr, r11, r10
-+ moveq r7,0
-+
-+ rjmp __avr32_f64_add_opL_subnormal_done
++ bfextu r12, r10, 0, 1 /* Extract parity bit.*/
++ or lr, r12 /* or it together with the sticky bits. */
++ eorh lr, 0x8000 /* Toggle round bit. */
++ /* We should now round up by adding one for the following cases:
++
++ halfway sticky|parity round-up
++ 0 x no
++ 1 0 no
++ 1 1 yes
++
++ Since we have inverted the halfway bit we can use the satu instruction
++ by saturating to 1 bit to implement this.
++ */
++ satu lr >> 0, 1
++#else
++ lsr lr, 31
++#endif
++ add r10, lr
++ acr r11
++
++ /* Return result in [r11,r10] */
++ ldm sp++, r5, r6, r7,pc
+
++
+__avr32_f64_add_opH_nan_or_inf:
+ /* Check if opH is NaN, if so return NaN */
-+ cbr r11, 31
++ cbr r11, 20
+ or lr, r11, r10
+ brne __avr32_f64_add_return_nan
+
+ breq __avr32_f64_add_opL_nan_or_inf
+ ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */
+__avr32_f64_add_opL_nan_or_inf:
-+ cbr r9, 31
++ cbr r9, 20
+ or lr, r9, r8
+ brne __avr32_f64_add_return_nan
+ mov r10, 0 /* Generate Inf in r11, r10 */
-+ mov r11, 0
-+ orh r11, 0x7ff0
++ mov_imm r11, 0x7ff00000
+ ldm sp++, r5, r6, r7, pc/* opL Inf, return Inf */
+__avr32_f64_add_return_nan:
+ mov r10, -1 /* Generate NaN in r11, r10 */
+ /* Saturate the shift amount to 63. If the amount
+ is any larger op2 is insignificant. */
+ satu r6 >> 0, 6
-+#if defined(L_avr32_f64_addsub)
++ /* If shift amount is 32 there are no bits from the msw that are lost. */
++ moveq lr, r8
++ breq 0f
+ /* first remember whether part that is lost contains any 1 bits ... */
+ lsl lr,r9,r5 /* save all lost bits from msw */
++#if defined(L_avr32_f64_addsub)
++ cp.w r8, 0
++ srne r8
+ or lr,r8 /* also save lost bits (all) from lsw
+ now lr != 0 if we lose any bits */
+#endif
++0:
+ /* ... and now to the actual shift */
-+ mov r8,r9 /* msw -> lsw (i.e. "shift 32 first")*/
++ lsr r8,r9,r6 /* msw -> lsw and make rest of shift inside lsw*/
+ mov r9,0 /* clear msw */
-+ lsr r8,r8,r6 /* make rest of shift inside lsw*/
-+#if defined(L_avr32_f64_addsub)
-+ cp.w lr,0 /* if any '1' bit in part we lost ...*/
-+ sreq lr
-+ or r8, lr /* ... we need to set sticky bit*/
-+#endif
+ rjmp __avr32_f64_add_shift_done
+
+__avr32_f64_add_res_of:
-+ /* We overflowed. Increase exponent and shift mantissa.*/
-+ /* [r7]:exp, [r11, r10]:mant */
-+ ror r11
-+ lsr r10, 1
-+ sub r7, -1
++ /* We overflowed. Scale down mantissa by shifting right one position. */
++ or lr, lr, lr << 1 /* Remember stickybits*/
++ lsr r11, 1
++ ror r10
++ ror lr
++ sub r7, -1 /* Increment exponent */
+
+ /* Clear mantissa to set result to Inf if the exponent is 255. */
+ cp.w r7, 0x7ff
+ moveq r10, 0
+ moveq r11, 0
++ moveq lr, 0
+ rjmp __avr32_f64_add_res_of_done
+
-+
++__avr32_f64_add_op2_subnormal:
++ /* Set epxponent to 1 */
++ mov r6, 1
++
++ /* Check if op2 is also subnormal. */
++ cp.w r7, 0
++ brne 2b
++
++ cbr r11, 20
++ /* Both operands are subnormal. Just addd the mantissas
++ and the exponent will automatically be set to 1 if
++ we overflow into a normal number. */
++ add r10, r8
++ adc r11, r11, r9
++
++ /* Add sign bit */
++ or r11, r12
++
++ /* Return result in [r11,r10] */
++ ldm sp++, r5, r6, r7,pc
++
++
++
+#endif
+
+#ifdef L_avr32_f64_to_u32
+ brlt 0f
+ /*Return infinity */
+ mov r10, 0
-+ mov r11, 0
-+ orh r11, 0xffe0
++ mov_imm r11, 0xffe00000
+ rjmp __floatsidf_return_op1
+
+0:
+ If the result is zero, then the two values are both zero. */
+ or r12, r11
+ lsl r12, 1
-+ sreq r12
-+ ret r12
++ reteq 1
++ ret 0
+0:
+ /* Numbers were equal. Check for NaN or Inf */
-+ mov r11, 0
-+ orh r11, 0xff00
++ mov_imm r11, 0xff000000
+ lsl r12, 1
+ cp.w r12, r11
-+ srls r12 /* 0 if NaN, 1 otherwise */
-+ ret r12
++ retls 1 /* 0 if NaN, 1 otherwise */
++ ret 0
+#endif
+
+#if defined(L_avr32_f32_cmp_ge) || defined(L_avr32_f32_cmp_lt)
+#endif
+ lsl r10, r12, 1 /* Remove sign bits */
+ lsl r9, r11, 1
-+ mov r8, 0
-+ orh r8, 0xff00
++ subfeq r10, 0
++#ifdef L_avr32_f32_cmp_ge
++ reteq 1 /* Both number are zero. Return true. */
++#endif
++#ifdef L_avr32_f32_cmp_lt
++ reteq 0 /* Both number are zero. Return false. */
++#endif
++ mov_imm r8, 0xff000000
+ cp.w r10, r8
+ rethi 0 /* Op0 is NaN */
+ cp.w r9, r8
+ /* Both signs positive */
+ cp.w r12, r11
+#ifdef L_avr32_f32_cmp_ge
-+ srhs r12
++ reths 1
++ retlo 0
+#endif
+#ifdef L_avr32_f32_cmp_lt
-+ srlo r12
++ reths 0
++ retlo 1
+#endif
-+ retal r12
+0:
+ /* Both signs negative */
+ cp.w r11, r12
+#ifdef L_avr32_f32_cmp_ge
-+ srhs r12
++ reths 1
++ retlo 0
+#endif
+#ifdef L_avr32_f32_cmp_lt
-+ srlo r12
++ reths 0
++ retlo 1
+#endif
-+ retal r12
+#endif
+
+
+ or r11,r10 /* Check if all bits are zero */
+ or r11,r9
+ or r11,r8
-+ sreq r12 /* If all zeros the arguments are equal
++ reteq 1 /* If all zeros the arguments are equal
+ so return 1 else return 0 */
-+ ret r12
++ ret 0
+0:
+ /* check for NaN */
+ lsl r11,1
-+ mov r12, 0
-+ orh r12, 0xffe0
++ mov_imm r12, 0xffe00000
+ cp.w r10,0
+ cpc r11,r12 /* check if nan or inf */
-+ srls r12 /* If Arg is NaN return 0 else 1*/
-+ ret r12 /* Return */
++ retls 1 /* If Arg is NaN return 0 else 1*/
++ ret 0 /* Return */
+
+#endif
+
+#endif
+
+ /* compare magnitude of op1 and op2 */
-+ pushm lr
-+
+ lsl r11,1 /* Remove sign bit of op1 */
-+ srcs lr /* Sign op1 to lsb of lr*/
++ srcs r12 /* Sign op1 to lsb of r12*/
++ subfeq r10, 0
++ breq 3f /* op1 zero */
+ lsl r9,1 /* Remove sign bit of op2 */
-+ rol lr /* Sign op2 to lsb of lr, sign bit op1 bit 1 of lr*/
++ rol r12 /* Sign op2 to lsb of lr, sign bit op1 bit 1 of r12*/
++
+
+ /* Check for Nan */
-+ mov r12, 0
-+ orh r12, 0xffe0
++ pushm lr
++ mov_imm lr, 0xffe00000
+ cp.w r10,0
-+ cpc r11,r12
-+ movhi r12, 0 /* Return false for NaN */
++ cpc r11,lr
+ brhi 0f /* We have NaN */
+ cp.w r8,0
-+ cpc r9,r12
-+ movhi r12, 0 /* Return false for NaN */
++ cpc r9,lr
+ brhi 0f /* We have NaN */
++ popm lr
+
-+ cp.w lr,3 /* both operands negative ?*/
++ cp.w r12,3 /* both operands negative ?*/
+ breq 1f
+
-+ cp.w lr,1 /* both operands positive? */
++ cp.w r12,1 /* both operands positive? */
+ brlo 2f
+
+ /* Different signs. If sign of op1 is negative the difference
+ between op1 and op2 will always be negative, and if op1 is
+ positive the difference will always be positive */
+#ifdef L_avr32_f64_cmp_ge
-+ sreq r12
++ reteq 1
++ retne 0
+#endif
+#ifdef L_avr32_f64_cmp_lt
-+ srne r12
++ reteq 0
++ retne 1
+#endif
-+ popm pc
-+
-+
++
+2:
+ /* Both operands positive. Just compute the difference */
+ cp.w r10,r8
+ cpc r11,r9
+#ifdef L_avr32_f64_cmp_ge
-+ srhs r12
++ reths 1
++ retlo 0
+#endif
+#ifdef L_avr32_f64_cmp_lt
-+ srlo r12
++ reths 0
++ retlo 1
+#endif
-+ popm pc
+
+1:
+ /* Both operands negative. Compute the difference with operands switched */
+ cp r8,r10
+ cpc r9,r11
+#ifdef L_avr32_f64_cmp_ge
-+ srhs r12
++ reths 1
++ retlo 0
+#endif
+#ifdef L_avr32_f64_cmp_lt
-+ srlo r12
++ reths 0
++ retlo 1
+#endif
++
+0:
-+ popm pc
++ popm pc, r12=0
+#endif
+
-+
++3:
++ lsl r9,1 /* Remove sign bit of op1 */
++#ifdef L_avr32_f64_cmp_ge
++ srcs r12 /* If op2 is negative then op1 >= op2. */
++#endif
++#ifdef L_avr32_f64_cmp_lt
++ srcc r12 /* If op2 is positve then op1 <= op2. */
++#endif
++ subfeq r8, 0
++#ifdef L_avr32_f64_cmp_ge
++ reteq 1 /* Both operands are zero. Return true. */
++#endif
++#ifdef L_avr32_f64_cmp_lt
++ reteq 0 /* Both operands are zero. Return false. */
++#endif
++ ret r12
++
+
-+#ifdef L_avr32_f64_div
++#if defined(L_avr32_f64_div) || defined(L_avr32_f64_div_fast)
+ .align 2
++
++#if defined(L_avr32_f64_div_fast)
++ .global __avr32_f64_div_fast
++ .type __avr32_f64_div_fast,@function
++__avr32_f64_div_fast:
++#else
+ .global __avr32_f64_div
+ .type __avr32_f64_div,@function
-+
+__avr32_f64_div:
++#endif
+ stm --sp, r0, r1, r2, r3, r4, r5, r6, r7,lr
+ /* op1 in {r11,r10}*/
+ /* op2 in {r9,r8}*/
+ breq 11f /*If number is subnormal, normalize it */
+22:
+ cp r7, 0x7ff
-+ brhs 2f /* Check op1 for NaN or Inf */
++ brge 2f /* Check op1 for NaN or Inf */
+
+ /* Unpack op2 to 2.62 format*/
+ /* exp: r6 */
+ breq 13f /*If number is subnormal, normalize it */
+23:
+ cp r6, 0x7ff
-+ brhs 3f /* Check op2 for NaN or Inf */
++ brge 3f /* Check op2 for NaN or Inf */
+
+ /* Calculate new exponent */
+ sub r7, r6
+ /* New approximations : r3, r2 */
+ /* op1 = Dividend (2.62 format) : r11, r10 */
+
-+ mov r12, 1 /* Load TWO */
-+ brev r12
++ mov_imm r12, 0x80000000
+
+ /* Load initial guess, using look-up table */
+ /* Initial guess is of format 01.XY, where XY is constructed as follows: */
+ /* Multiply with dividend to get quotient */
+ mul_approx_df r3 /*ah*/, r2 /*al*/, r11 /*bh*/, r10 /*bl*/, r3 /*rh*/, r2 /*rl*/, r1 /*sh*/, r0 /*sl*/
+
-+ /* Shift by 3 to get result in 1.63 format, as required by the exponent. */
-+ /* Note that 1.63 format is already used by the exponent in r7, since */
-+ /* a bias of 1023 was added to the result exponent, even though the implicit */
-+ /* bit was inserted. This gives the exponent an additional bias of 1, which */
-+ /* supports 1.63 format. */
-+ lsl r3, r3, 3
-+ or r3, r3, r2>>29
-+ lsl r2, r2, 3
+
+ /* To increase speed, this result is not corrected before final rounding.*/
+ /* This may give a difference to IEEE compliant code of 1 ULP.*/
++
+
+ /* Adjust exponent and mantissa */
-+ /* r7:exp, [r3, r2]:mant, [r12,r11,r10]:scratch*/
++ /* r7:exp, [r3, r2]:mant, [r5, r4]:scratch*/
+ /* Mantissa may be of the format 0.xxxx or 1.xxxx. */
+ /* In the first case, shift one pos to left.*/
-+ sub r10, r7, 1
-+ mov r12, r3
-+ lsl r11, r2, 1
-+ rol r12
-+ bld r3, 31
-+ movne r7, r10
-+ movne r3, r12
-+ movne r2, r11
++ bld r3, 31-3
++ breq 0f
++ lsl r2, 1
++ rol r3
++ sub r7, 1
++#if defined(L_avr32_f64_div)
++ /* We must scale down the dividend to 5.59 format. */
++ lsr r10, 3
++ or r10, r10, r11 << 29
++ lsr r11, 3
++ rjmp 1f
++#endif
++0:
++#if defined(L_avr32_f64_div)
++ /* We must scale down the dividend to 6.58 format. */
++ lsr r10, 4
++ or r10, r10, r11 << 28
++ lsr r11, 4
++1:
++#endif
+ cp r7, 0
-+ breq 15f /*Result was subnormal. Flush-to-zero and return zero*/
-+
-+ /* Result was not subnormal. Perform rounding. */
-+ /* Note that the tie case (for round-to-even) can not occur in division. */
-+ /* [r7]:exp, [r3, r2]:mant */
-+ /* Mantissa is in 0.64 format. Round by adding 1<<(64-(52+2))=1<<10*/
-+ mov r12, (1<<10)
-+ add r2, r12
-+ acr r3
-+ /* Adjust exponent if we overflowed.*/
-+ subcs r7, -1
-+
-+
-+
-+ /* Pack final result*/
-+ /* Input: [r7]:exp, [r3, r2]:mant */
-+ /* Result in [r11,r10] */
-+ /* Insert exponent and sign bit*/
-+ lsl r11, r7, 20
++ brle __avr32_f64_div_res_subnormal /* Result was subnormal. */
++
++
++#if defined(L_avr32_f64_div)
++ /* In order to round correctly we calculate the remainder:
++ Remainder = dividend[11:r10] - divisor[r9:r8]*quotient[r3:r2]
++ for the case when the quotient is halfway between the round-up
++ value and the round down value. If the remainder then is negative
++ it means that the quotient was to big and that it should not be
++ rounded up, if the remainder is positive the quotient was to small
++ and we need to round up. If the remainder is zero it means that the
++ quotient is exact but since we need to remove the guard bit we should
++ round to even. */
++
++ /* Truncate and add guard bit. */
++ andl r2, 0xff00
++ orl r2, 0x0080
++
++
++ /* Now do the multiplication. The quotient has the format 4.60
++ while the divisor has the format 2.62 which gives a result
++ of 6.58 */
++ mulu.d r0, r3, r8
++ macu.d r0, r2, r9
++ mulu.d r4, r2, r8
++ mulu.d r8, r3, r9
++ add r5, r0
++ adc r8, r8, r1
++ acr r9
++
++
++ /* Check if remainder is positive, negative or equal. */
++ bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */
++ cp r4, 0
++ cpc r5
++__avr32_f64_div_round_subnormal:
++ cpc r8, r10
++ cpc r9, r11
++ srlo r6 /* Remainder positive: we need to round up.*/
++ moveq r6, r12 /* Remainder zero: round up if mantissa odd. */
++#else
++ bfextu r6, r2, 7, 1 /* Get guard bit */
++#endif
++ /* Final packing, scale down mantissa. */
++ lsr r10, r2, 8
++ or r10, r10, r3<<24
++ lsr r11, r3, 8
++ /* Insert exponent and sign bit*/
++ bfins r11, r7, 20, 11
+ bld lr, 31
+ bst r11, 31
-+ /* Insert mantissa */
-+ cbr r3, 31 /*Clear implicit bit*/
-+ or r11, r11, r3>>11
-+ lsr r10, r2, 11
-+ or r10, r10, r3<<21
++
++ /* Final rounding */
++ add r10, r6
++ acr r11
++
+ /* Return result in [r11,r10] */
+ ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
+
+ rjmp 15f /* Op2 was inf, return zero*/
+
+11: /* Op1 was denormal. Fix it. */
-+ lsl r11, 2
-+ or r11, r11, r10 >> 30
-+ lsl r10, 2
-+ cbr r11, 31
++ lsl r11, 3
++ or r11, r11, r10 >> 29
++ lsl r10, 3
+ /* Check if op1 is zero. */
+ or r4, r10, r11
+ breq __avr32_f64_div_op1_zero
+
+
+13: /* Op2 was denormal. Fix it */
-+ lsl r9, 2
-+ or r9, r9, r8 >> 30
-+ lsl r8, 2
-+ cbr r9, 31
++ lsl r9, 3
++ or r9, r9, r8 >> 29
++ lsl r8, 3
+ /* Check if op2 is zero. */
+ or r4, r9, r8
+ breq 17f /* Divisor is zero -> return Inf */
+ rjmp 23b
+
+
-+15: /* Divide result was subnormal. Return zero. */
++__avr32_f64_div_res_subnormal:/* Divide result was subnormal. */
++#if defined(L_avr32_f64_div)
++ /* Check how much we must scale down the mantissa. */
++ neg r7
++ sub r7, -1 /* We do no longer have an implicit bit. */
++ satu r7 >> 0, 6 /* Saturate shift amount to max 63. */
++ cp.w r7, 32
++ brge 0f
++ /* Shift amount <32 */
++ /* Scale down quotient */
++ rsub r6, r7, 32
++ lsr r2, r2, r7
++ lsl r12, r3, r6
++ or r2, r12
++ lsr r3, r3, r7
++ /* Scale down the dividend to match the scaling of the quotient. */
++ lsl r1, r10, r6
++ lsr r10, r10, r7
++ lsl r12, r11, r6
++ or r10, r12
++ lsr r11, r11, r7
++ mov r0, 0
++ rjmp 1f
++0:
++ /* Shift amount >=32 */
++ rsub r6, r7, 32
++ moveq r0, 0
++ moveq r12, 0
++ breq 0f
++ lsl r0, r10, r6
++ lsl r12, r11, r6
++0:
++ lsr r2, r3, r7
++ mov r3, 0
++ /* Scale down the dividend to match the scaling of the quotient. */
++ lsr r1, r10, r7
++ or r1, r12
++ lsr r10, r11, r7
++ mov r11, 0
++1:
++ /* Start performing the same rounding as done for normal numbers
++ but this time we have scaled the quotient and dividend and hence
++ need a little different comparison. */
++ /* Truncate and add guard bit. */
++ andl r2, 0xff00
++ orl r2, 0x0080
++
++ /* Now do the multiplication. */
++ mulu.d r6, r3, r8
++ macu.d r6, r2, r9
++ mulu.d r4, r2, r8
++ mulu.d r8, r3, r9
++ add r5, r6
++ adc r8, r8, r7
++ acr r9
++
++ /* Set exponent to 0 */
++ mov r7, 0
++
++ /* Check if remainder is positive, negative or equal. */
++ bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */
++ cp r4, r0
++ cpc r5, r1
++ /* Now the rest of the rounding is the same as for normals. */
++ rjmp __avr32_f64_div_round_subnormal
++
++#endif
++15:
++ /* Flush to zero for the fast version. */
+ mov r11, lr /*Get correct sign*/
+ andh r11, 0x8000, COH
+ mov r10, 0
+ ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
-+
++
+16: /* Return NaN. */
+ mov r11, -1
+ mov r10, -1
+ breq __avr32_f32_sub_op2_subnormal
+0:
+ /* Get shift amount to scale mantissa of op2. */
-+ rsub r9, r10
++ sub r12, r10, r9
++
++ breq __avr32_f32_sub_shift_done
+
+ /* Saturate the shift amount to 31. If the amount
+ is any larger op2 is insignificant. */
-+ satu r9 >> 0, 5
-+
++ satu r12 >> 0, 5
++
++ /* Put the remaining bits into r9.*/
++ rsub r9, r12, 32
++ lsl r9, r11, r9
++
++ /* If the remaining bits are non-zero then we must subtract one
++ more from opL. */
++ subne r8, 1
++ srne r9 /* LSB of r9 represents sticky bits. */
++
+ /* Shift mantissa of op2 to same decimal point as the mantissa
+ of op1. */
-+ lsr r12, r11, r9
-+
-+ /* Put the remainding bits into r11[23:..].*/
-+ rsub r9, r9, (32-8)
-+ lsl r11, r11, r9
++ lsr r11, r11, r12
+
++
++__avr32_f32_sub_shift_done:
+ /* Now subtract the mantissas. */
-+ sub r8, r12
++ sub r8, r11
+
+ ld.w r12, sp++
+
+ /* Normalize resulting mantissa. */
-+ clz r9, r8
-+ lsl r8, r8, r9
-+ sub r10, r9
++ clz r11, r8
++
++ retcs 0
++ lsl r8, r8, r11
++ sub r10, r11
+ brle __avr32_f32_sub_subnormal_result
-+
-+ /* Insert the bits we will remove from the mantissa into r11[31:24] */
-+ bfins r11, r8, 24, 8
++
++ /* Insert the bits we will remove from the mantissa into r9[31:24] */
++ or r9, r9, r8 << 24
+#else
+ /* Ignore sticky bit to simplify and speed up rounding */
+ /* op2 is either zero or subnormal. */
+
+ /* Normalize resulting mantissa. */
+ clz r9, r8
++ retcs 0
+ lsl r8, r8, r9
+ sub r10, r9
+ brle __avr32_f32_sub_subnormal_result
+ /* Round */
+__avr32_f32_sub_round:
+#if defined(L_avr32_f32_addsub)
-+ mov r10, 0
-+ sbr r10, 31
++ mov_imm r10, 0x80000000
+ bld r12, 0
+ subne r10, -1
-+ cp.w r11, r10
++ cp.w r9, r10
+ subhs r12, -1
+#else
+ bld r8, 7
+ /* Check if the number is so small that
+ it will be represented with zero. */
+ rsub r10, r10, 9
-+ rsub r9, r10, 32
++ rsub r11, r10, 32
+ retcs 0
+
+ /* Shift the mantissa into the correct position.*/
+ lsr r10, r8, r10
+ /* Add sign bit. */
-+ or r10, r12
++ or r12, r10
+
+ /* Put the shifted out bits in the most significant part
+ of r8. */
-+ lsl r8, r8, r9
++ lsl r8, r8, r11
+
+#if defined(L_avr32_f32_addsub)
-+ /* Add all the remainder bits used for rounding into r11 */
-+ andh r11, 0x00FF
-+ or r11, r8
++ /* Add all the remainder bits used for rounding into r9 */
++ or r9, r8
+#else
+ lsr r8, 24
+#endif
+ lsr r9, 23
+
+#if defined(L_avr32_f32_addsub)
-+ /* Keep sticky bit for correct IEEE rounding */
-+ st.w --sp, r12
-+
+ /* op2 is either zero or subnormal. */
+ breq __avr32_f32_add_op2_subnormal
+0:
++ /* Keep sticky bit for correct IEEE rounding */
++ st.w --sp, r12
++
+ /* Get shift amount to scale mantissa of op2. */
+ rsub r9, r10
+
+
+ /* Round */
+#if defined(L_avr32_f32_addsub)
-+ mov r10, 0
-+ sbr r10, 31
++ mov_imm r10, 0x80000000
+ bld r12, 0
+ subne r10, -1
+ cp.w r11, r10
+ /* Check if op1 is also subnormal. */
+ cp.w r10, 0
+ brne 0b
-+
++ /* Both operands subnormal, just add the mantissas and
++ pack. If the addition of the subnormal numbers results
++ in a normal number then the exponent will automatically
++ be set to 1 by the addition. */
+ cbr r8, 31
-+ /* If op1 is not zero set exponent to 1. */
-+ movne r10,1
-+
-+ rjmp 0b
++ add r11, r8
++ or r12, r12, r11 >> 8
++ ret r12
+
+__avr32_f32_add_op1_nan_or_inf:
+ /* Check if op1 is NaN, if so return NaN */
+
+
+#endif
-+
-+
++
++
+#if defined(L_avr32_f32_div) || defined(L_avr32_f32_div_fast)
+ .align 2
+
+ /* Initial guess : r11 */
+ /* New approximations : r11 */
+ /* Dividend : r12 */
-+
-+ mov r10, 1 /* Load TWO */
-+ brev r10
++
++ /* Load TWO */
++ mov_imm r10, 0x80000000
+
+ lsr r12, 2 /* Get significand of Op1 in 2.30 format */
+ lsr r5, r11, 2 /* Get significand of Op2 (=d) in 2.30 format */
+ /* a bias of 127 was added to the result exponent, even though the implicit */
+ /* bit was inserted. This gives the exponent an additional bias of 1, which */
+ /* supports 1.31 format. */
-+ lsl r10, r7, 3
-+
++ //lsl r10, r7, 3
++
++ /* Adjust exponent and mantissa in case the result is of format
++ 0000.1xxx to 0001.xxx*/
+#if defined(L_avr32_f32_div)
-+ /* To perform correct rounding, check for nonzero remainder, */
-+ /* and set LSB in quot if remainder != 0 */
-+ /* Remainder = dividend(r12) - divisor(r5)*quotient(r10) */
-+
-+ lsl r5, 1 /* Transform divisor from 2.30 to 1.31 format */
-+ /* Mask all bits lower than guard in quotient. */
-+ /* These bits are inexact due to approximative algorithm */
-+ andl r10, 0xffc0
-+ /* Add 1 in least significant bit pos to make sure approximation is from above */
-+ sub r10, -64
-+ mulu.d r6, r5, r10
-+ /* If remainder < 0, truncated quotient is too large, so the */
-+ /* delta added must be subtracted to get the correct truncated quotient. */
-+ sub r12, r7 /* Calculate remainder and implicitly set flags */
-+ sublt r10, 64
++ lsr r12, 4 /* Scale dividend to 6.26 format to match the
++ result of the multiplication of the divisor and
++ quotient to get the remainder. */
+#endif
-+
++ bld r7, 31-3
++ breq 0f
++ lsl r7, 1
++ sub r9, 1
++#if defined(L_avr32_f32_div)
++ lsl r12, 1 /* Scale dividend to 5.27 format to match the
++ result of the multiplication of the divisor and
++ quotient to get the remainder. */
++#endif
++0:
++ cp r9, 0
++ brle __avr32_f32_div_res_subnormal /* Result was subnormal. */
++
++
++#if defined(L_avr32_f32_div)
++ /* In order to round correctly we calculate the remainder:
++ Remainder = dividend[r12] - divisor[r5]*quotient[r7]
++ for the case when the quotient is halfway between the round-up
++ value and the round down value. If the remainder then is negative
++ it means that the quotient was to big and that it should not be
++ rounded up, if the remainder is positive the quotient was to small
++ and we need to round up. If the remainder is zero it means that the
++ quotient is exact but since we need to remove the guard bit we should
++ round to even. */
++ andl r7, 0xffe0
++ orl r7, 0x0010
++
++ /* Now do the multiplication. The quotient has the format 4.28
++ while the divisor has the format 2.30 which gives a result
++ of 6.26 */
++ mulu.d r10, r5, r7
++
++ /* Check if remainder is positive, negative or equal. */
++ bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */
++ cp r10, 0
++__avr32_f32_div_round_subnormal:
++ cpc r11, r12
++ srlo r11 /* Remainder positive: we need to round up.*/
++ moveq r11, r5 /* Remainder zero: round up if mantissa odd. */
++#else
++ bfextu r11, r7, 4, 1 /* Get guard bit */
++#endif
++
++ /* Pack final result*/
++ lsr r12, r7, 5
++ bfins r12, r9, 23, 8
+ /* For UC3, load with postincrement is faster than ldm */
+ ld.d r6, sp++
+ ld.w r5, sp++
-+
-+ /* Adjust exponent and mantissa */
-+ /* r9:exp, r10:mant, r11:scratch*/
-+ clz r11, r10
-+ sub r9, r11
-+ breq 16f /*Result was subnormal*/
-+ lsl r10, r10, r11
-+
-+ /* Result was not subnormal. Perform rounding. */
-+ /* Note that the tie case (for round-to-even) can not occur in division. */
-+ /* r9:exp, r10:mant*/
-+ sub r10, -1*(0x80)
-+ /* Adjust exponent if we overflowed. Note that we must use {cc}
-+ since we perform the add using a sub insn. */
-+ subcc r9, -1
-+
-+ /* Pack final result*/
-+ lsr r12, r10, 7
-+ bfins r12, r9, 24, 8
++ bld r8, 31
++ bst r12, 31
++ /* Rounding add. */
++ add r12, r11
++ ret r12
++
+__divsf_return_op1:
+ lsl r8, 1
+ ror r12
+ /* Op1 is NaN or inf */
+ retne -1 /* Return NaN if op1 is NaN */
+ /* Op1 is inf check op2 */
-+ mov r9, 0xff
-+ brev r9
++ mov_imm r9, 0xff000000
+ cp r11, r9
+ brlo __divsf_return_op1 /* inf/number gives inf */
+ ret -1 /* The rest gives NaN*/
+3:
-+ /* Op1 is NaN or inf */
++ /* Op2 is NaN or inf */
+ reteq 0 /* Return zero if number/inf*/
+ ret -1 /* Return NaN*/
+4:
+ tst r12,r12
+ reteq -1 /* 0.0/0.0 is NaN */
+ /* Nonzero/0.0 is Inf. Sign bit will be shifted in before returning*/
-+ mov r12, 0x7ff
-+ brev r12
++ mov_imm r12, 0xff000000
+ rjmp __divsf_return_op1
+
+11: /* Op1 was denormal. Fix it. */
+ rjmp 14b
+
+
-+16: /* Divide result was subnormal. Fix it and return. */
++__avr32_f32_div_res_subnormal: /* Divide result was subnormal */
+#if defined(L_avr32_f32_div)
-+ lsl r10, r10, r11 /*Perform shift required by adjustment of exponent and mantissa*/
-+ adjust_subnormal_sf r12 /*sf*/, r9 /*exp*/, r10 /*mant*/, r8 /*sign*/,r11 /*scratch*/
++ /* Check how much we must scale down the mantissa. */
++ neg r9
++ sub r9, -1 /* We do no longer have an implicit bit. */
++ satu r9 >> 0, 5 /* Saturate shift amount to max 32. */
++ /* Scale down quotient */
++ rsub r10, r9, 32
++ lsr r7, r7, r9
++ /* Scale down the dividend to match the scaling of the quotient. */
++ lsl r6, r12, r10 /* Make the divident 64-bit and put the lsw in r6 */
++ lsr r12, r12, r9
++
++ /* Start performing the same rounding as done for normal numbers
++ but this time we have scaled the quotient and dividend and hence
++ need a little different comparison. */
++ andl r7, 0xffe0
++ orl r7, 0x0010
++
++ /* Now do the multiplication. The quotient has the format 4.28
++ while the divisor has the format 2.30 which gives a result
++ of 6.26 */
++ mulu.d r10, r5, r7
++
++ /* Set exponent to 0 */
++ mov r9, 0
++
++ /* Check if remainder is positive, negative or equal. */
++ bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */
++ cp r10, r6
++ rjmp __avr32_f32_div_round_subnormal
++
+#else
++ ld.d r6, sp++
++ ld.w r5, sp++
+ /*Flush to zero*/
-+ mov r12, 0
++ ret 0
+#endif
-+ ret r12
-+
+#endif
+
+#ifdef L_avr32_f32_mul
+
+ /* Round */
+__avr32_f32_mul_round:
-+ mov r8, 0
-+ sbr r8, 31
++ mov_imm r8, 0x80000000
+ bld r12, 0
+ subne r8, -1
+
+
+0:
+ /* Inf or NaN*/
-+ mov r10, 0
-+ orh r10, 0xffe0
++ mov_imm r10, 0xffe00000
+ lsl r11,8 /* check mantissa */
+ movne r11, -1 /* Return NaN */
+ moveq r11, r10 /* Return inf */
+ /* NaN or inf */
+ cbr r12,31 /* clear implicit bit */
+ retne -1 /* Return NaN if mantissa not zero */
-+ mov r12, 0
-+ orh r12, 0xff00
++ mov_imm r12, 0xff000000
+ ret r12 /* Return inf */
+
+3: /* Result is subnormal. Adjust it.*/
+
+#endif
+
-\ No newline at end of file
-Index: gcc-4.2.3/gcc/config/avr32/lib2funcs.S
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/lib2funcs.S 2008-05-21 13:45:54.173288052 +0200
++#if defined(L_mulsi3) && (__AVR32_UC__ == 3)
++ .global __mulsi3
++ .type __mulsi3,@function
++
++__mulsi3:
++ mov r9, 0
++0:
++ lsr r11, 1
++ addcs r9, r9, r12
++ breq 1f
++ lsl r12, 1
++ rjmp 0b
++1:
++ ret r9
++#endif
+--- /dev/null
++++ b/gcc/config/avr32/lib2funcs.S
@@ -0,0 +1,21 @@
+ .align 4
+ .global __nonlocal_goto
+
+
+
-Index: gcc-4.2.3/gcc/config/avr32/linux-elf.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/linux-elf.h 2008-05-21 13:45:54.173288052 +0200
-@@ -0,0 +1,156 @@
+--- /dev/null
++++ b/gcc/config/avr32/linux-elf.h
+@@ -0,0 +1,151 @@
+/*
+ Linux/Elf specific definitions.
+ Copyright 2003-2006 Atmel Corporation.
+ "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+
+#undef ASM_SPEC
-+#define ASM_SPEC "%{!mno-pic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{mcpu=*:-mcpu=%*}"
-+
++#define ASM_SPEC "%{!mno-pic:%{!fno-pic:--pic}} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{mcpu=*:-mcpu=%*}"
++
+#undef LINK_SPEC
+#define LINK_SPEC "%{version:-v} \
+ %{static:-Bstatic} \
+ builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
+ if (TARGET_FAST_FLOAT) \
+ builtin_define ("__AVR32_FAST_FLOAT__"); \
-+ if (flag_pic) \
-+ { \
-+ builtin_define ("__PIC__"); \
-+ builtin_define ("__pic__"); \
-+ } \
+ } \
+ while (0)
+
+
+#define LINK_GCC_C_SEQUENCE_SPEC \
+ "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
-Index: gcc-4.2.3/gcc/config/avr32/predicates.md
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/predicates.md 2008-05-21 13:45:54.173288052 +0200
-@@ -0,0 +1,331 @@
+--- /dev/null
++++ b/gcc/config/avr32/predicates.md
+@@ -0,0 +1,419 @@
+;; AVR32 predicates file.
+;; Copyright 2003-2006 Atmel Corporation.
+;;
+ (match_test "register_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))"))
+ (and (match_test "GET_CODE(XEXP(op, 0)) == CONST_INT")
+ (match_test "register_operand(XEXP(op, 1), GET_MODE(XEXP(op, 1)))"))))
-+ {
-+ return 1;
-+ }
+ )
+
+
+(define_predicate "avr32_logical_insn_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "avr32_logical_shift_operand"))
-+ {
-+ return 1;
-+ }
+)
+
+
+ (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
+ || (XINT(op, 1) == UNSPEC_COND_PL)"))))
+
++(define_predicate "avr32_cond3_comparison_operator"
++ (ior (match_code "eq, ne, ge, lt, geu, ltu")
++ (and (match_code "unspec")
++ (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
++ || (XINT(op, 1) == UNSPEC_COND_PL)"))))
++
+;; True for avr32 comparison operand
+(define_predicate "avr32_comparison_operand"
+ (ior (and (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
-+ (match_test "(rtx_equal_p (XEXP(op,0), cc0_rtx) && rtx_equal_p (XEXP(op,1), const0_rtx))"))
++ (match_test "(CC0_P (XEXP(op,0)) && rtx_equal_p (XEXP(op,1), const0_rtx))"))
+ (and (match_code "unspec")
+ (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
+ || (XINT(op, 1) == UNSPEC_COND_PL)"))))
+ })
+
+
++;; Immediate all the low 16-bits cleared
++(define_predicate "avr32_hi16_immediate_operand"
++ (match_code "const_int")
++ {
++ /* If the low 16-bits are zero then this
++ is a hi16 immediate. */
++ return ((INTVAL(op) & 0xffff) == 0);
++ }
++)
++
+;; True if this is a register or immediate operand
+(define_predicate "register_immediate_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "immediate_operand")))
+
++;; True if this is a register or const_int operand
++(define_predicate "register_const_int_operand"
++ (ior (match_operand 0 "register_operand")
++ (and (match_operand 0 "const_int_operand")
++ (match_operand 0 "immediate_operand"))))
++
++;; True if this is a register or const_double operand
++(define_predicate "register_const_double_operand"
++ (ior (match_operand 0 "register_operand")
++ (match_operand 0 "const_double_operand")))
+
+;; True is this is an operand containing a label_ref
+(define_predicate "avr32_label_ref_operand"
+
+;; True is this is valid avr32 symbol operand
+(define_predicate "avr32_symbol_operand"
-+ (ior (match_code "label_ref, symbol_ref")
-+ (and (match_code "const")
-+ (match_test "avr32_find_symbol(op)"))))
++ (and (match_code "label_ref, symbol_ref, const")
++ (match_test "avr32_find_symbol(op)")))
+
+;; True is this is valid operand for the lda.w and call pseudo insns
+(define_predicate "avr32_address_operand"
-+ (and (match_code "label_ref, symbol_ref")
++ (and (and (match_code "label_ref, symbol_ref")
++ (match_test "avr32_find_symbol(op)"))
+ (ior (match_test "TARGET_HAS_ASM_ADDR_PSEUDOS")
+ (match_test "flag_pic")) ))
+
++;; An immediate k16 address operand
++(define_predicate "avr32_ks16_address_operand"
++ (and (match_operand 0 "address_operand")
++ (ior (match_test "REG_P(op)")
++ (match_test "GET_CODE(op) == PLUS
++ && ((GET_CODE(XEXP(op,0)) == CONST_INT)
++ || (GET_CODE(XEXP(op,1)) == CONST_INT))")) ))
++
++;; An offset k16 memory operand
++(define_predicate "avr32_ks16_memory_operand"
++ (and (match_code "mem")
++ (match_test "avr32_ks16_address_operand (XEXP (op, 0), GET_MODE (XEXP (op, 0)))")))
++
++;; An immediate k11 address operand
++(define_predicate "avr32_ks11_address_operand"
++ (and (match_operand 0 "address_operand")
++ (ior (match_test "REG_P(op)")
++ (match_test "GET_CODE(op) == PLUS
++ && (((GET_CODE(XEXP(op,0)) == CONST_INT)
++ && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,0)), 'K', \"Ks11\"))
++ || ((GET_CODE(XEXP(op,1)) == CONST_INT)
++ && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,1)), 'K', \"Ks11\")))")) ))
++
+;; True if this is a avr32 call operand
+(define_predicate "avr32_call_operand"
+ (ior (ior (match_operand 0 "register_operand")
+(define_predicate "avr32_cond_immediate_operand"
+ (and (match_operand 0 "immediate_operand")
+ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is08\")")))
-Index: gcc-4.2.3/gcc/config/avr32/simd.md
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/simd.md 2008-05-21 13:45:54.173288052 +0200
++
++
++(define_predicate "avr32_cond_move_operand"
++ (ior (ior (match_operand 0 "register_operand")
++ (and (match_operand 0 "immediate_operand")
++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")")))
++ (and (match_test "TARGET_V2_INSNS")
++ (match_operand 0 "memory_operand"))))
++
++(define_predicate "avr32_mov_immediate_operand"
++ (and (match_operand 0 "immediate_operand")
++ (match_test "avr32_const_ok_for_move(INTVAL(op))")))
++
++
++(define_predicate "avr32_rmw_address_operand"
++ (ior (and (match_code "symbol_ref")
++ (match_test "({rtx symbol = avr32_find_symbol(op); \
++ symbol && (GET_CODE (symbol) == SYMBOL_REF) && SYMBOL_REF_RMW_ADDR(symbol);})"))
++ (and (match_operand 0 "immediate_operand")
++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks17\")")))
++ {
++ return TARGET_RMW && !flag_pic;
++ }
++)
++
++(define_predicate "avr32_rmw_memory_operand"
++ (and (match_code "mem")
++ (match_test "(GET_MODE(op) == SImode) &&
++ avr32_rmw_address_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))")))
++
++(define_predicate "avr32_rmw_memory_or_register_operand"
++ (ior (match_operand 0 "avr32_rmw_memory_operand")
++ (match_operand 0 "register_operand")))
++
++(define_predicate "avr32_non_rmw_memory_operand"
++ (and (not (match_operand 0 "avr32_rmw_memory_operand"))
++ (match_operand 0 "memory_operand")))
++
++(define_predicate "avr32_non_rmw_general_operand"
++ (and (not (match_operand 0 "avr32_rmw_memory_operand"))
++ (match_operand 0 "general_operand")))
++
++(define_predicate "avr32_non_rmw_nonimmediate_operand"
++ (and (not (match_operand 0 "avr32_rmw_memory_operand"))
++ (match_operand 0 "nonimmediate_operand")))
+--- /dev/null
++++ b/gcc/config/avr32/simd.md
@@ -0,0 +1,145 @@
+;; AVR32 machine description file for SIMD instructions.
+;; Copyright 2003-2006 Atmel Corporation.
+ "psubadd.h\t%0, %1:b, %2:b"
+ [(set_attr "length" "4")
+ (set_attr "type" "alu")])
-Index: gcc-4.2.3/gcc/config/avr32/sync.md
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/sync.md 2008-05-21 13:45:54.177287723 +0200
-@@ -0,0 +1,175 @@
+--- /dev/null
++++ b/gcc/config/avr32/sync.md
+@@ -0,0 +1,244 @@
+;;=================================================================
+;; Atomic operations
+;;=================================================================
+(define_code_attr atomic_insn [(plus "add") (minus "sub") (and "and") (ior "ior") (xor "xor")])
+
+(define_insn "sync_loadsi"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
++ ; NB! Put an early clobber on the destination operand to
++ ; avoid gcc using the same register in the source and
++ ; destination. This is done in order to avoid gcc to
++ ; clobber the source operand since these instructions
++ ; are actually inside a "loop".
++ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (unspec_volatile:SI
-+ [(match_operand:SI 1 "memory_operand" "RKs16")
++ [(match_operand:SI 1 "avr32_ks16_memory_operand" "RKs16")
+ (label_ref (match_operand 2 "" ""))]
+ VUNSPEC_SYNC_SET_LOCK_AND_LOAD) )]
+ ""
+ )
+
+(define_insn "sync_store_if_lock"
-+ [(set (match_operand:SI 0 "memory_operand" "=RKs16")
++ [(set (match_operand:SI 0 "avr32_ks16_memory_operand" "=RKs16")
+ (unspec_volatile:SI
+ [(match_operand:SI 1 "register_operand" "r")
+ (label_ref (match_operand 2 "" ""))]
+(define_expand "sync_<atomic_insn>si"
+ [(set (match_dup 2)
+ (unspec_volatile:SI
-+ [(match_operand:SI 0 "memory_operand" "")
++ [(match_operand:SI 0 "avr32_ks16_memory_operand" "")
+ (match_dup 3)]
+ VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
+ (set (match_dup 2)
+ (unspec_volatile:SI
+ [(match_dup 2)
+ (match_dup 3)]
-+ VUNSPEC_SYNC_STORE_IF_LOCK) )]
++ VUNSPEC_SYNC_STORE_IF_LOCK) )
++ (use (match_dup 1))
++ (use (match_dup 4))]
+ ""
+ {
++ rtx *mem_expr = &operands[0];
++ rtx ptr_reg;
++ if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
++ {
++ ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
++ XEXP (*mem_expr, 0) = ptr_reg;
++ }
++ else
++ {
++ rtx address = XEXP (*mem_expr, 0);
++ if ( REG_P (address) )
++ ptr_reg = address;
++ else if ( REG_P (XEXP (address, 0)) )
++ ptr_reg = XEXP (address, 0);
++ else
++ ptr_reg = XEXP (address, 1);
++ }
++
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
++ operands[4] = ptr_reg;
++
+ }
+ )
+
+(define_expand "sync_old_<atomic_insn>si"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (unspec_volatile:SI
-+ [(match_operand:SI 1 "memory_operand" "")
++ [(match_operand:SI 1 "avr32_ks16_memory_operand" "")
+ (match_dup 4)]
+ VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
+ (set (match_dup 3)
+ (unspec_volatile:SI
+ [(match_dup 3)
+ (match_dup 4)]
-+ VUNSPEC_SYNC_STORE_IF_LOCK) )]
++ VUNSPEC_SYNC_STORE_IF_LOCK) )
++ (use (match_dup 2))
++ (use (match_dup 5))]
+ ""
+ {
++ rtx *mem_expr = &operands[1];
++ rtx ptr_reg;
++ if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
++ {
++ ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
++ XEXP (*mem_expr, 0) = ptr_reg;
++ }
++ else
++ {
++ rtx address = XEXP (*mem_expr, 0);
++ if ( REG_P (address) )
++ ptr_reg = address;
++ else if ( REG_P (XEXP (address, 0)) )
++ ptr_reg = XEXP (address, 0);
++ else
++ ptr_reg = XEXP (address, 1);
++ }
++
+ operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
++ operands[5] = ptr_reg;
+ }
+ )
+
+(define_expand "sync_new_<atomic_insn>si"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (unspec_volatile:SI
-+ [(match_operand:SI 1 "memory_operand" "")
++ [(match_operand:SI 1 "avr32_ks16_memory_operand" "")
+ (match_dup 3)]
+ VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
+ (set (match_dup 0)
+ (unspec_volatile:SI
+ [(match_dup 0)
+ (match_dup 3)]
-+ VUNSPEC_SYNC_STORE_IF_LOCK) )]
++ VUNSPEC_SYNC_STORE_IF_LOCK) )
++ (use (match_dup 2))
++ (use (match_dup 4))]
+ ""
+ {
++ rtx *mem_expr = &operands[1];
++ rtx ptr_reg;
++ if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
++ {
++ ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
++ XEXP (*mem_expr, 0) = ptr_reg;
++ }
++ else
++ {
++ rtx address = XEXP (*mem_expr, 0);
++ if ( REG_P (address) )
++ ptr_reg = address;
++ else if ( REG_P (XEXP (address, 0)) )
++ ptr_reg = XEXP (address, 0);
++ else
++ ptr_reg = XEXP (address, 1);
++ }
++
+ operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
++ operands[4] = ptr_reg;
+ }
+ )
+
+ "xchg\t%0, %p1, %2"
+ [(set_attr "length" "4")]
+ )
-Index: gcc-4.2.3/gcc/config/avr32/t-avr32
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/t-avr32 2008-05-21 13:45:54.177287723 +0200
-@@ -0,0 +1,76 @@
+--- /dev/null
++++ b/gcc/config/avr32/t-avr32
+@@ -0,0 +1,77 @@
+
+MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \
+ $(srcdir)/config/avr32/sync.md \
+ $(srcdir)/config/avr32/fpcp.md \
+ $(srcdir)/config/avr32/simd.md \
-+ $(srcdir)/config/avr32/predicates.md
++ $(srcdir)/config/avr32/predicates.md
+
+s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
+ s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
+DPBIT = dp-bit.c
+
+LIB1ASMSRC = avr32/lib1funcs.S
-+LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_addsub _avr32_f64_addsub_fast _avr32_f64_to_u32 \
++LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_mul_fast _avr32_f64_addsub _avr32_f64_addsub_fast _avr32_f64_to_u32 \
+ _avr32_f64_to_s32 _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 \
+ _avr32_s32_to_f64 _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \
-+ _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt _avr32_f64_div \
++ _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt _avr32_f64_div _avr32_f64_div_fast \
+ _avr32_f32_div _avr32_f32_div_fast _avr32_f32_addsub _avr32_f32_addsub_fast \
+ _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \
-+ _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32
++ _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32 _mulsi3
+
+#LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S
+
-+MULTILIB_OPTIONS = march=ap/march=uc
-+MULTILIB_DIRNAMES = ap uc
++MULTILIB_OPTIONS = march=ap/march=ucr1/march=ucr2/march=ucr2nomul
++MULTILIB_DIRNAMES = ap ucr1 ucr2 ucr2nomul
+MULTILIB_EXCEPTIONS =
-+MULTILIB_MATCHES = march?ap=mcpu?ap7000
-+MULTILIB_MATCHES += march?ap=mcpu?ap7010
-+MULTILIB_MATCHES += march?ap=mcpu?ap7020
-+MULTILIB_MATCHES += march?uc=mcpu?uc3a0256
-+MULTILIB_MATCHES += march?uc=mcpu?uc3a0512
-+MULTILIB_MATCHES += march?uc=mcpu?uc3a1128
-+MULTILIB_MATCHES += march?uc=mcpu?uc3a1256
-+MULTILIB_MATCHES += march?uc=mcpu?uc3a1512
-+MULTILIB_MATCHES += march?uc=mcpu?uc3b064
-+MULTILIB_MATCHES += march?uc=mcpu?uc3b0128
-+MULTILIB_MATCHES += march?uc=mcpu?uc3b0256
-+MULTILIB_MATCHES += march?uc=mcpu?uc3b164
-+MULTILIB_MATCHES += march?uc=mcpu?uc3b1128
-+MULTILIB_MATCHES += march?uc=mcpu?uc3b1256
+MULTILIB_MATCHES += march?ap=mpart?ap7000
-+MULTILIB_MATCHES += march?ap=mpart?ap7010
-+MULTILIB_MATCHES += march?ap=mpart?ap7020
-+MULTILIB_MATCHES += march?uc=mpart?uc3a0256
-+MULTILIB_MATCHES += march?uc=mpart?uc3a0512
-+MULTILIB_MATCHES += march?uc=mpart?uc3a1128
-+MULTILIB_MATCHES += march?uc=mpart?uc3a1256
-+MULTILIB_MATCHES += march?uc=mpart?uc3a1512
-+MULTILIB_MATCHES += march?uc=mpart?uc3b064
-+MULTILIB_MATCHES += march?uc=mpart?uc3b0128
-+MULTILIB_MATCHES += march?uc=mpart?uc3b0256
-+MULTILIB_MATCHES += march?uc=mpart?uc3b164
-+MULTILIB_MATCHES += march?uc=mpart?uc3b1128
-+MULTILIB_MATCHES += march?uc=mpart?uc3b1256
++MULTILIB_MATCHES += march?ap=mpart?ap7001
++MULTILIB_MATCHES += march?ap=mpart?ap7002
++MULTILIB_MATCHES += march?ap=mpart?ap7200
++MULTILIB_MATCHES += march?ucr1=march?uc
++MULTILIB_MATCHES += march?ucr1=mpart?uc3a0512es
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a0128
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a0256
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a0512
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a1128
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a1256
++MULTILIB_MATCHES += march?ucr1=mpart?uc3a1512es
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a1512
++MULTILIB_MATCHES += march?ucr2nomul=mpart?uc3a3revd
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a364
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a364s
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128s
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256s
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b064
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b0128
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256es
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b164
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b1128
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256es
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256
++
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
+
+
+
+
-Index: gcc-4.2.3/gcc/config/avr32/t-elf
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/t-elf 2008-05-21 13:45:54.177287723 +0200
+--- /dev/null
++++ b/gcc/config/avr32/t-elf
@@ -0,0 +1,16 @@
+
+# Assemble startup files.
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
-Index: gcc-4.2.3/gcc/config/avr32/uclinux-elf.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/uclinux-elf.h 2008-05-21 13:45:54.177287723 +0200
+--- /dev/null
++++ b/gcc/config/avr32/uclinux-elf.h
@@ -0,0 +1,20 @@
+
+/* Run-time Target Specification. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (AVR32_FLAG_NO_INIT_GOT)
-Index: gcc-4.2.3/gcc/config/host-linux.c
-===================================================================
---- gcc-4.2.3.orig/gcc/config/host-linux.c 2007-09-01 17:28:30.000000000 +0200
-+++ gcc-4.2.3/gcc/config/host-linux.c 2008-05-21 13:45:54.177287723 +0200
+--- a/gcc/config/host-linux.c
++++ b/gcc/config/host-linux.c
@@ -25,6 +25,9 @@
#include "hosthooks.h"
#include "hosthooks-def.h"
/* Linux has a feature called exec-shield-randomize that perturbs the
address of non-fixed mapped segments by a (relatively) small amount.
-Index: gcc-4.2.3/gcc/config.gcc
-===================================================================
---- gcc-4.2.3.orig/gcc/config.gcc 2008-05-21 13:45:53.353287795 +0200
-+++ gcc-4.2.3/gcc/config.gcc 2008-05-21 13:45:54.181287953 +0200
-@@ -790,6 +790,24 @@
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -781,6 +781,24 @@ avr-*-*)
tm_file="avr/avr.h dbxelf.h"
use_fixproto=yes
;;
bfin*-elf*)
tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h"
tmake_file=bfin/t-bfin-elf
-@@ -1690,6 +1708,9 @@
+@@ -1681,6 +1699,9 @@ pdp11-*-bsd)
pdp11-*-*)
use_fixproto=yes
;;
# port not yet contributed
#powerpc-*-openbsd*)
# tmake_file="${tmake_file} rs6000/t-fprules rs6000/t-fprules-fpbit "
-@@ -2726,6 +2747,32 @@
+@@ -2717,6 +2738,32 @@ case "${target}" in
fi
;;
fr*-*-*linux*)
supported_defaults=cpu
case "$with_cpu" in
-Index: gcc-4.2.3/gcc/doc/extend.texi
-===================================================================
---- gcc-4.2.3.orig/gcc/doc/extend.texi 2008-02-01 02:40:49.000000000 +0100
-+++ gcc-4.2.3/gcc/doc/extend.texi 2008-05-21 13:45:54.222289824 +0200
-@@ -1981,7 +1981,7 @@
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -1981,7 +1981,7 @@ this attribute to work correctly.
@item interrupt
@cindex interrupt handler functions
ports to indicate that the specified function is an interrupt handler.
The compiler will generate function entry and exit sequences suitable
for use in an interrupt handler when this attribute is present.
-@@ -2000,6 +2000,15 @@
+@@ -2000,6 +2000,15 @@ void f () __attribute__ ((interrupt ("IR
Permissible values for this parameter are: IRQ, FIQ, SWI, ABORT and UNDEF@.
@item interrupt_handler
@cindex interrupt handler functions on the Blackfin, m68k, H8/300 and SH processors
Use this attribute on the Blackfin, m68k, H8/300, H8/300H, H8S, and SH to
-@@ -6175,6 +6184,7 @@
+@@ -3460,6 +3469,23 @@ placed in either the @code{.bss_below100
+
+ @end table
+
++@subsection AVR32 Variable Attributes
++
++One attribute is currently defined for AVR32 configurations:
++@code{rmw_addressable}
++
++@table @code
++@item rmw_addressable
++@cindex @code{rmw_addressable} attribute
++
++This attribute can be used to signal that a variable can be accessed
++with the addressing mode of the AVR32 Atomic Read-Modify-Write memory
++instructions and hence make it possible for gcc to generate these
++instructions without using built-in functions or inline assembly statements.
++Variables used within the AVR32 Atomic Read-Modify-Write built-in
++functions will automatically get the @code{rmw_addressable} attribute.
++@end table
++
+ @node Type Attributes
+ @section Specifying Attributes of Types
+ @cindex attribute of types
+@@ -6167,6 +6193,7 @@ instructions, but allow the compiler to
@menu
* Alpha Built-in Functions::
* ARM Built-in Functions::
* Blackfin Built-in Functions::
* FR-V Built-in Functions::
* X86 Built-in Functions::
-@@ -6413,6 +6423,54 @@
+@@ -6405,6 +6432,76 @@ long long __builtin_arm_wxor (long long,
long long __builtin_arm_wzero ()
@end smallexample
+@subsection AVR32 Built-in Functions
+
+
++
++Built-in functions for atomic memory (RMW) instructions. Note that these
++built-ins will fail for targets where the RMW instructions are not
++implemented. Also note that these instructions only that a Ks15 << 2
++memory address and will therefor not work with any runtime computed
++memory addresses. The user is responsible for making sure that any
++pointers used within these functions points to a valid memory address.
++
+@smallexample
++void __builtin_mems(int */*ptr*/, int /*bit*/)
++void __builtin_memc(int */*ptr*/, int /*bit*/)
++void __builtin_memt(int */*ptr*/, int /*bit*/)
++@end smallexample
++
++Built-in functions for DSP instructions. Note that these built-ins will
++fail for targets where the DSP instructions are not implemented.
+
++@smallexample
+int __builtin_sats (int /*Rd*/,int /*sa*/, int /*bn*/)
+int __builtin_satu (int /*Rd*/,int /*sa*/, int /*bn*/)
+int __builtin_satrnds (int /*Rd*/,int /*sa*/, int /*bn*/)
+long long __builtin_mulnwh_d(int, short)
+long long __builtin_macwh_d(long long, int, short)
+long long __builtin_machh_d(long long, short, short)
++@end smallexample
+
++Other built-in functions for instructions that cannot easily be
++generated by the compiler.
++
++@smallexample
++void __builtin_ssrf(int);
++void __builtin_csrf(int);
+void __builtin_musfr(int);
+int __builtin_mustr(void);
+int __builtin_mfsr(int /*Status Register Address*/)
+void __builtin_mvrc_w(int/*cpnr*/, int/*crd*/, int/*value*/)
+long long __builtin_mvcr_d(int/*cpnr*/, int/*crs*/)
+void __builtin_mvrc_d(int/*cpnr*/, int/*crd*/, long long/*value*/)
-+
+@end smallexample
+
@node Blackfin Built-in Functions
@subsection Blackfin Built-in Functions
-Index: gcc-4.2.3/gcc/doc/invoke.texi
-===================================================================
---- gcc-4.2.3.orig/gcc/doc/invoke.texi 2008-01-31 19:40:58.000000000 +0100
-+++ gcc-4.2.3/gcc/doc/invoke.texi 2008-05-21 13:45:54.274288067 +0200
-@@ -190,7 +190,7 @@
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -190,7 +190,7 @@ in the following sections.
-fno-default-inline -fvisibility-inlines-hidden @gol
-Wabi -Wctor-dtor-privacy @gol
-Wnon-virtual-dtor -Wreorder @gol
-Wno-non-template-friend -Wold-style-cast @gol
-Woverloaded-virtual -Wno-pmf-conversions @gol
-Wsign-promo}
-@@ -590,6 +590,12 @@
+@@ -588,6 +588,12 @@ Objective-C and Objective-C++ Dialects}.
-mauto-incdec -minmax -mlong-calls -mshort @gol
-msoft-reg-count=@var{count}}
+@emph{AVR32 Options}
+@gccoptlist{-muse-rodata-section -mhard-float -msoft-float -mrelax @gol
-+-mforce-double-align -mno-init-got -mpart=@var{part} -mcpu=@var{cpu} @gol
-+-march=@var{arch} -mfast-float -masm-addr-pseudos -mno-asm-addr-pseudos -mno-pic
-+}
++-mforce-double-align -mno-init-got -mrelax -mmd-reorg-opt -masm-addr-pseudos @gol
++-mpart=@var{part} -mcpu=@var{cpu} -march=@var{arch} @gol
++-mfast-float -mimm-in-const-pool}
+
@emph{MCore Options}
@gccoptlist{-mhardlit -mno-hardlit -mdiv -mno-div -mrelax-immediates @gol
-mno-relax-immediates -mwide-bitfields -mno-wide-bitfields @gol
-@@ -1869,14 +1875,6 @@
+@@ -1868,14 +1874,6 @@ to filter out those warnings.
@opindex Wno-deprecated
Do not warn about usage of deprecated features. @xref{Deprecated Features}.
@item -Wno-non-template-friend @r{(C++ only)}
@opindex Wno-non-template-friend
Disable warnings when non-templatized friend functions are declared
-@@ -2733,13 +2731,11 @@
+@@ -2732,13 +2730,11 @@ requiring @option{-O}.
If you want to warn about code which uses the uninitialized value of the
variable in its own initializer, use the @option{-Winit-self} option.
Note that there may be no warning about a variable that is used only
to compute a value that itself is never used, because such
-@@ -6228,10 +6224,6 @@
+@@ -6201,10 +6197,6 @@ If number of candidates in the set is sm
we always try to remove unnecessary ivs from the set during its
optimization when a new iv is added to the set.
@item vect-max-version-checks
The maximum number of runtime checks that can be performed when doing
loop versioning in the vectorizer. See option ftree-vect-loop-version
-@@ -7429,7 +7421,7 @@
+@@ -7402,7 +7394,7 @@ platform.
* ARC Options::
* ARM Options::
* AVR Options::
* CRIS Options::
* CRX Options::
* Darwin Options::
-@@ -7894,81 +7886,68 @@
+@@ -7867,81 +7859,80 @@ comply to the C standards, but it will p
size.
@end table
-@item -mno-low-64k
-@opindex mno-low-64k
-Assume that the program is arbitrarily large. This is the default.
-+@item -mrelax
-+@opindex mrelax
-+Enable relaxing in linker. This means that when the address of symbols
-+are known at link time, the linker can optimize @samp{icall} and @samp{mcall}
-+instructions into a @samp{rcall} instruction if possible. Loading the address
-+of a symbol can also be optimized.
-+
-+@item -mforce-double-align
-+@opindex mforce-double-align
-+Force double-word alignment for double-word memory accesses.
-+
-+@item -mno-init-got
-+@opindex mno-init-got
-+Do not initialize the GOT register before using it when compiling PIC
-+code.
++@item -mhard-float
++@opindex mhard-float
++Use floating point coprocessor instructions.
-@item -mid-shared-library
-@opindex mid-shared-library
-Generate code that supports shared libraries via the library ID method.
-This allows for execute in place and shared libraries in an environment
-without virtual memory management. This option implies @option{-fPIC}.
-+@item -mno-pic
-+@opindex mno-pic
-+Do not emit position-independent code (will break dynamic linking.)
++@item -msoft-float
++@opindex msoft-float
++Use software floating-point library for floating-point operations.
-@item -mno-id-shared-library
-@opindex mno-id-shared-library
-Generate code that doesn't assume ID based shared libraries are being used.
-This is the default.
++@item -mforce-double-align
++@opindex mforce-double-align
++Force double-word alignment for double-word memory accesses.
++
++@item -mno-init-got
++@opindex mno-init-got
++Do not initialize the GOT register before using it when compiling PIC
++code.
+
+-@item -mshared-library-id=n
+-@opindex mshared-library-id
+-Specified the identification number of the ID based shared library being
+-compiled. Specifying a value of 0 will generate more compact code, specifying
+-other values will force the allocation of that number to the current
+-library but is no more space or time efficient than omitting this option.
++@item -mrelax
++@opindex mrelax
++Let invoked assembler and linker do relaxing
++(Enabled by default when optimization level is >1).
++This means that when the address of symbols are known at link time,
++the linker can optimize @samp{icall} and @samp{mcall}
++instructions into a @samp{rcall} instruction if possible.
++Loading the address of a symbol can also be optimized.
++
++@item -mmd-reorg-opt
++@opindex mmd-reorg-opt
++Perform machine dependent optimizations in reorg stage.
++
+@item -masm-addr-pseudos
+@opindex masm-addr-pseudos
+Use assembler pseudo-instructions lda.w and call for handling direct
+addresses. (Enabled by default)
+
-+@item -mno-asm-addr-pseudos
-+@opindex mno-asm-addr-pseudos
-+Do not use assembler pseudo-instructions lda.w and call for handling direct addresses.
-+
+@item -mpart=@var{part}
+@opindex mpart
-+Generate code for the specified part. Permissible parts are: @samp{ap7000},
-+@samp{ap7010},@samp{ap7020},@samp{uc3a0256}, @samp{uc3a0512},
-+@samp{uc3a1128}, @samp{uc3a1256}, @samp{uc3a1512}.
++Generate code for the specified part. Permissible parts are:
++@samp{ap7000}, @samp{ap7010},@samp{ap7020},
++@samp{uc3a0128}, @samp{uc3a0256}, @samp{uc3a0512},
++@samp{uc3a1128}, @samp{uc3a1256}, @samp{uc3a1512},
++@samp{uc3b064}, @samp{uc3b0128}, @samp{uc3b0256},
++@samp{uc3b164}, @samp{uc3b1128}, @samp{uc3b1256}.
--@item -mshared-library-id=n
--@opindex mshared-library-id
--Specified the identification number of the ID based shared library being
--compiled. Specifying a value of 0 will generate more compact code, specifying
--other values will force the allocation of that number to the current
--library but is no more space or time efficient than omitting this option.
+-@item -mlong-calls
+-@itemx -mno-long-calls
+-@opindex mlong-calls
+-@opindex mno-long-calls
+-Tells the compiler to perform function calls by first loading the
+-address of the function into a register and then performing a subroutine
+-call on this register. This switch is needed if the target function
+-will lie outside of the 24 bit addressing range of the offset based
+-version of subroutine call instruction.
+@item -mcpu=@var{cpu-type}
+@opindex mcpu
+Same as -mpart. Obsolete.
+Enable fast floating-point library that does not conform to ieee but is still good enough
+for most applications. The fast floating-point library does not round to the nearest even
+but away from zero. Enabled by default if the -funsafe-math-optimizations switch is specified.
-
--@item -mlong-calls
--@itemx -mno-long-calls
--@opindex mlong-calls
--@opindex mno-long-calls
--Tells the compiler to perform function calls by first loading the
--address of the function into a register and then performing a subroutine
--call on this register. This switch is needed if the target function
--will lie outside of the 24 bit addressing range of the offset based
--version of subroutine call instruction.
++
++@item -mimm-in-const-pool
++@opindex mimm-in-const-pool
++Put large immediates in constant pool. This is enabled by default for archs with insn-cache.
-This feature is not enabled by default. Specifying
-@option{-mno-long-calls} will restore the default behavior. Note these
@end table
@node CRIS Options
-@@ -11879,6 +11858,7 @@
- Application Binary Interface, PowerPC processor supplement. This is the
- default unless you configured GCC using @samp{powerpc-*-eabiaix}.
-
-+
- @item -mcall-sysv-eabi
- @opindex mcall-sysv-eabi
- Specify both @option{-mcall-sysv} and @option{-meabi} options.
-@@ -14181,4 +14161,4 @@
- exist, because otherwise they won't get converted.
-
- @xref{Protoize Caveats}, for more information on how to use
--@code{protoize} successfully.
-+@code{protoize} successfully.
-\ No newline at end of file
-Index: gcc-4.2.3/gcc/doc/md.texi
-===================================================================
---- gcc-4.2.3.orig/gcc/doc/md.texi 2007-04-04 03:24:10.000000000 +0200
-+++ gcc-4.2.3/gcc/doc/md.texi 2008-05-21 13:45:54.302287723 +0200
-@@ -3,6 +3,7 @@
- @c This is part of the GCC manual.
- @c For copying conditions, see the file gcc.texi.
-
-+
- @ifset INTERNALS
- @node Machine Desc
- @chapter Machine Descriptions
-@@ -1681,6 +1682,58 @@
+--- a/gcc/doc/md.texi
++++ b/gcc/doc/md.texi
+@@ -1681,6 +1681,80 @@ A memory reference suitable for iWMMXt l
A memory reference suitable for the ARMv4 ldrsb instruction.
@end table
+@item AVR32 family---@file{avr32.h}
+@table @code
+@item f
-+Floating-point registers (f0 to f15)
++Floating-point registers (f0 to f15) (Reserved for future use)
+
+@item Ku@var{bits}
+Unsigned constant representable with @var{bits} number of bits (Must be
+values in the least and most significant words both are in the range
+@math{-2^{20}} to @math{2^{20}-1}.
+
++@item M
++Any 32-bit immediate with the most significant bits set to zero and the
++remaining least significant bits set to one.
++
++@item J
++A 32-bit immediate where all the lower 16-bits are zero.
++
++@item O
++A 32-bit immediate with one bit set and the rest of the bits cleared.
++
++@item N
++A 32-bit immediate with one bit cleared and the rest of the bits set.
++
++@item L
++A 32-bit immediate where all the lower 16-bits are set.
++
++@item Q
++Any AVR32 memory reference except for reference used for the atomic memory (RMW) instructions.
++
+@item RKs@var{bits}
+A memory reference where the address consists of a base register
+plus a signed immediate displacement with range given by @samp{Ks@var{bits}}
+A valid operand for use in the @samp{lda.w} instruction macro when
+relaxing is enabled
+
++@item Y
++A memory reference suitable for the atomic memory (RMW) instructions.
++
+@item Z
+A memory reference valid for coprocessor memory instructions
+
@item AVR family---@file{config/avr/constraints.md}
@table @code
@item l
-Index: gcc-4.2.3/gcc/expmed.c
-===================================================================
---- gcc-4.2.3.orig/gcc/expmed.c 2007-09-01 17:28:30.000000000 +0200
-+++ gcc-4.2.3/gcc/expmed.c 2008-05-21 13:45:54.306288512 +0200
-@@ -36,6 +36,7 @@
+--- a/gcc/expmed.c
++++ b/gcc/expmed.c
+@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3.
#include "real.h"
#include "recog.h"
#include "langhooks.h"
static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
-@@ -454,9 +455,18 @@
+@@ -454,9 +455,19 @@ store_bit_field (rtx str_rtx, unsigned H
? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
|| GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
&& byte_offset % GET_MODE_SIZE (fieldmode) == 0)
+ consistent with the container type. */
+ !(MEM_VOLATILE_P (op0)
+ && GET_MODE_BITSIZE (GET_MODE (op0)) != bitsize
++ && bitsize < BITS_PER_WORD
+ && !targetm.narrow_volatile_bitfield ())
+ && (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
+ || (offset * BITS_PER_UNIT % bitsize == 0
{
if (MEM_P (op0))
op0 = adjust_address (op0, fieldmode, offset);
-@@ -1256,6 +1266,13 @@
+@@ -1256,6 +1267,13 @@ extract_bit_field (rtx str_rtx, unsigned
&& GET_MODE_SIZE (mode1) != 0
&& byte_offset % GET_MODE_SIZE (mode1) == 0)
|| (MEM_P (op0)
&& (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
|| (offset * BITS_PER_UNIT % bitsize == 0
&& MEM_ALIGN (op0) % bitsize == 0)))))
-Index: gcc-4.2.3/gcc/expr.c
-===================================================================
---- gcc-4.2.3.orig/gcc/expr.c 2008-01-22 20:39:39.000000000 +0100
-+++ gcc-4.2.3/gcc/expr.c 2008-05-21 13:45:54.330288217 +0200
-@@ -3549,18 +3549,19 @@
+--- a/gcc/expr.c
++++ b/gcc/expr.c
+@@ -3519,18 +3519,19 @@ emit_single_push_insn (enum machine_mode
}
else
{
dest = gen_rtx_MEM (mode, dest_addr);
if (type != 0)
-@@ -5539,7 +5540,16 @@
+@@ -5509,7 +5510,21 @@ store_field (rtx target, HOST_WIDE_INT b
is a bit field, we cannot use addressing to access it.
Use bit-field techniques or SUBREG to store in it. */
+ consistent with the container type. */
+ (MEM_P (target)
+ && MEM_VOLATILE_P (target)
-+ && GET_MODE_BITSIZE (GET_MODE (target)) > bitsize
++ && ((GET_MODE (target) != BLKmode
++ && GET_MODE_BITSIZE (GET_MODE (target)) > bitsize )
++ /* If BLKmode, check if this is a record. Do not know
++ if this is really necesarry though...*/
++ || (GET_MODE (target) == BLKmode
++ && TREE_CODE (type) == RECORD_TYPE))
+ && !targetm.narrow_volatile_bitfield ())
+ || mode == VOIDmode
|| (mode != BLKmode && ! direct_store[(int) mode]
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
-@@ -7590,7 +7600,16 @@
+@@ -7560,7 +7575,21 @@ expand_expr_real_1 (tree exp, rtx target
by doing the extract into an object as wide as the field
(which we know to be the width of a basic mode), then
storing into memory, and changing the mode to BLKmode. */
+ consistent with the container type. */
+ (MEM_P (op0)
+ && MEM_VOLATILE_P (op0)
-+ && GET_MODE_BITSIZE (GET_MODE (op0)) > bitsize
++ && ((GET_MODE (op0) != BLKmode
++ && GET_MODE_BITSIZE (GET_MODE (op0)) > bitsize )
++ /* If BLKmode, check if this is a record. Do not know
++ if this is really necesarry though...*/
++ || (GET_MODE (op0) == BLKmode
++ && TREE_CODE (type) == RECORD_TYPE))
+ && !targetm.narrow_volatile_bitfield ())
+ || mode1 == VOIDmode
|| REG_P (op0) || GET_CODE (op0) == SUBREG
|| (mode1 != BLKmode && ! direct_load[(int) mode1]
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
-Index: gcc-4.2.3/gcc/function.c
-===================================================================
---- gcc-4.2.3.orig/gcc/function.c 2007-09-01 17:28:30.000000000 +0200
-+++ gcc-4.2.3/gcc/function.c 2008-05-21 13:45:54.334287888 +0200
-@@ -2676,8 +2676,12 @@
+--- a/gcc/flow.c
++++ b/gcc/flow.c
+@@ -3327,12 +3327,12 @@ not_reg_cond (rtx x)
+ if (GET_CODE (x) == NOT)
+ return XEXP (x, 0);
+ if (COMPARISON_P (x)
+- && REG_P (XEXP (x, 0)))
++ /* && REG_P (XEXP (x, 0))*/)
+ {
+- gcc_assert (XEXP (x, 1) == const0_rtx);
++ /*gcc_assert (XEXP (x, 1) == const0_rtx);*/
+
+ return gen_rtx_fmt_ee (reversed_comparison_code (x, NULL),
+- VOIDmode, XEXP (x, 0), const0_rtx);
++ VOIDmode, XEXP (x, 0), XEXP (x, 0) /*const0_rtx*/);
+ }
+ return gen_rtx_NOT (0, x);
+ }
+--- a/gcc/function.c
++++ b/gcc/function.c
+@@ -2676,8 +2676,12 @@ assign_parm_setup_reg (struct assign_par
SET_DECL_RTL (parm, parmreg);
/* Copy the value into the register. */
{
int save_tree_used;
-Index: gcc-4.2.3/gcc/genemit.c
-===================================================================
---- gcc-4.2.3.orig/gcc/genemit.c 2007-09-01 17:28:30.000000000 +0200
-+++ gcc-4.2.3/gcc/genemit.c 2008-05-21 13:45:54.342288908 +0200
-@@ -121,6 +121,24 @@
+--- a/gcc/genemit.c
++++ b/gcc/genemit.c
+@@ -121,6 +121,24 @@ max_operand_vec (rtx insn, int arg)
}
\f
static void
print_code (RTX_CODE code)
{
const char *p1;
-@@ -405,18 +423,16 @@
+@@ -405,18 +423,16 @@ gen_insn (rtx insn, int lineno)
fatal ("match_dup operand number has no match_operand");
/* Output the function name and argument declarations. */
/* Output code to construct and return the rtl for the instruction body. */
if (XVECLEN (insn, 1) == 1)
-@@ -456,16 +472,12 @@
+@@ -456,16 +472,12 @@ gen_expand (rtx expand)
operands = max_operand_vec (expand, 1);
/* Output the function name and argument declarations. */
printf ("{\n");
/* If we don't have any C code to write, only one insn is being written,
-@@ -475,6 +487,8 @@
+@@ -475,6 +487,8 @@ gen_expand (rtx expand)
&& operands > max_dup_opno
&& XVECLEN (expand, 1) == 1)
{
printf (" return ");
gen_exp (XVECEXP (expand, 1, 0), DEFINE_EXPAND, NULL);
printf (";\n}\n\n");
-@@ -488,6 +502,7 @@
+@@ -488,6 +502,7 @@ gen_expand (rtx expand)
for (; i <= max_scratch_opno; i++)
printf (" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
printf (" rtx _val = 0;\n");
printf (" start_sequence ();\n");
/* The fourth operand of DEFINE_EXPAND is some code to be executed
-Index: gcc-4.2.3/gcc/genflags.c
-===================================================================
---- gcc-4.2.3.orig/gcc/genflags.c 2007-09-01 17:28:30.000000000 +0200
-+++ gcc-4.2.3/gcc/genflags.c 2008-05-21 13:45:54.346288020 +0200
-@@ -127,7 +127,6 @@
+--- a/gcc/genflags.c
++++ b/gcc/genflags.c
+@@ -127,7 +127,6 @@ static void
gen_proto (rtx insn)
{
int num = num_operands (insn);
const char *name = XSTR (insn, 0);
int truth = maybe_eval_c_test (XSTR (insn, 2));
-@@ -158,12 +157,7 @@
+@@ -158,12 +157,7 @@ gen_proto (rtx insn)
if (num == 0)
fputs ("void", stdout);
else
puts (");");
-@@ -173,12 +167,7 @@
+@@ -173,12 +167,7 @@ gen_proto (rtx insn)
{
printf ("static inline rtx\ngen_%s", name);
if (num > 0)
else
puts ("(void)");
puts ("{\n return 0;\n}");
-Index: gcc-4.2.3/gcc/genoutput.c
-===================================================================
---- gcc-4.2.3.orig/gcc/genoutput.c 2007-09-01 17:28:30.000000000 +0200
-+++ gcc-4.2.3/gcc/genoutput.c 2008-05-21 13:45:54.354290436 +0200
-@@ -386,7 +386,7 @@
+--- a/gcc/genoutput.c
++++ b/gcc/genoutput.c
+@@ -386,7 +386,7 @@ output_insn_data (void)
}
if (d->name && d->name[0] != '*')
else
printf (" 0,\n");
-Index: gcc-4.2.3/gcc/ifcvt.c
-===================================================================
---- gcc-4.2.3.orig/gcc/ifcvt.c 2007-10-30 05:32:06.000000000 +0100
-+++ gcc-4.2.3/gcc/ifcvt.c 2008-05-21 13:45:54.358287873 +0200
-@@ -1050,7 +1050,11 @@
+--- a/gcc/ifcvt.c
++++ b/gcc/ifcvt.c
+@@ -77,7 +77,7 @@ static int num_possible_if_blocks;
+ static int num_updated_if_blocks;
+
+ /* # of changes made which require life information to be updated. */
+-static int num_true_changes;
++int num_true_changes;
+
+ /* Whether conditional execution changes were made. */
+ static int cond_exec_changed_p;
+@@ -287,12 +287,15 @@ cond_exec_process_insns (ce_if_block_t *
+ if (must_be_last)
+ return FALSE;
+
+- if (modified_in_p (test, insn))
+- {
+- if (!mod_ok)
+- return FALSE;
+- must_be_last = TRUE;
+- }
++#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN
++ if ( !IFCVT_ALLOW_MODIFY_TEST_IN_INSN )
++#endif
++ if (modified_in_p (test, insn))
++ {
++ if (!mod_ok)
++ return FALSE;
++ must_be_last = TRUE;
++ }
+
+ /* Now build the conditional form of the instruction. */
+ pattern = PATTERN (insn);
+@@ -566,16 +569,19 @@ cond_exec_process_if_block (ce_if_block_
+ /* Do any machine dependent final modifications. */
+ IFCVT_MODIFY_FINAL (ce_info);
+ #endif
+-
+- /* Conversion succeeded. */
+- if (dump_file)
+- fprintf (dump_file, "%d insn%s converted to conditional execution.\n",
+- n_insns, (n_insns == 1) ? " was" : "s were");
+-
++
+ /* Merge the blocks! */
+- merge_if_block (ce_info);
+- cond_exec_changed_p = TRUE;
+- return TRUE;
++ if ( reload_completed ){
++ /* Conversion succeeded. */
++ if (dump_file)
++ fprintf (dump_file, "%d insn%s converted to conditional execution.\n",
++ n_insns, (n_insns == 1) ? " was" : "s were");
++
++ merge_if_block (ce_info);
++ cond_exec_changed_p = TRUE;
++ return TRUE;
++ }
++ return FALSE;
+
+ fail:
+ #ifdef IFCVT_MODIFY_CANCEL
+@@ -1050,7 +1056,11 @@ noce_try_addcc (struct noce_if_info *if_
!= UNKNOWN))
{
rtx cond = if_info->cond;
/* First try to use addcc pattern. */
if (general_operand (XEXP (cond, 0), VOIDmode)
-Index: gcc-4.2.3/gcc/longlong.h
-===================================================================
---- gcc-4.2.3.orig/gcc/longlong.h 2007-09-01 17:28:30.000000000 +0200
-+++ gcc-4.2.3/gcc/longlong.h 2008-05-21 13:45:54.358287873 +0200
-@@ -226,6 +226,39 @@
+@@ -2651,7 +2661,12 @@ process_if_block (struct ce_if_block * c
+ && cond_move_process_if_block (ce_info))
+ return TRUE;
+
+- if (HAVE_conditional_execution && reload_completed)
++ if (HAVE_conditional_execution &&
++#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
++ (reload_completed || IFCVT_COND_EXEC_BEFORE_RELOAD))
++#else
++ reload_completed)
++#endif
+ {
+ /* If we have && and || tests, try to first handle combining the && and
+ || tests into the conditional code, and if that fails, go back and
+@@ -4036,6 +4051,15 @@ rest_of_handle_if_after_reload (void)
+ cleanup_cfg (CLEANUP_EXPENSIVE
+ | CLEANUP_UPDATE_LIFE
+ | (flag_crossjumping ? CLEANUP_CROSSJUMP : 0));
++
++ /* Hack for the AVR32 experimental ifcvt processing before reload.
++ The AVR32 specific ifcvt code needs to know when ifcvt after reload
++ has begun. */
++#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
++ if ( IFCVT_COND_EXEC_BEFORE_RELOAD )
++ cfun->machine->ifcvt_after_reload = 1;
++#endif
++
+ if (flag_if_conversion2)
+ if_convert (1);
+ return 0;
+--- a/gcc/longlong.h
++++ b/gcc/longlong.h
+@@ -226,6 +226,41 @@ UDItype __umulsidi3 (USItype, USItype);
#define UDIV_TIME 100
#endif /* __arm__ */
+ "r" ((USItype) (al)), \
+ "r" ((USItype) (bl)) __CLOBBER_CC)
+
++#if !defined (__AVR32_UC__) || __AVR32_UC__ != 3
+#define __umulsidi3(a,b) ((UDItype)(a) * (UDItype)(b))
+
+#define umul_ppmm(w1, w0, u, v) \
+ w1 = __w.s.high; \
+ w0 = __w.s.low; \
+}
++#endif
+
+#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X))
+#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctz (X))
#if defined (__hppa) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("add %4,%5,%1\n\taddc %2,%3,%0" \
-Index: gcc-4.2.3/gcc/optabs.h
-===================================================================
---- gcc-4.2.3.orig/gcc/optabs.h 2007-09-01 17:28:30.000000000 +0200
-+++ gcc-4.2.3/gcc/optabs.h 2008-05-21 13:45:54.358287873 +0200
-@@ -431,7 +431,7 @@
+--- a/gcc/optabs.h
++++ b/gcc/optabs.h
+@@ -431,7 +431,7 @@ extern enum insn_code reload_out_optab[N
extern GTY(()) optab code_to_optab[NUM_RTX_CODE + 1];
\f
/* Indexed by the rtx-code for a conditional (e.g. EQ, LT,...)
gives the gen_function to make a branch to test that condition. */
-Index: gcc-4.2.3/gcc/testsuite/gcc.dg/cpp/mac-eol-at-eof.c
-===================================================================
---- gcc-4.2.3.orig/gcc/testsuite/gcc.dg/cpp/mac-eol-at-eof.c 2005-02-19 20:48:02.000000000 +0100
-+++ gcc-4.2.3/gcc/testsuite/gcc.dg/cpp/mac-eol-at-eof.c 2008-05-21 13:45:54.358287873 +0200
-@@ -1 +1,3 @@
--/* Test no newline at eof warning when Mac line ending is used*/\r/* { dg-do compile } */\rint main() { return 0; } \r
-\ No newline at end of file
-+/* Test no newline at eof warning when Mac line ending is used*/
-+/* { dg-do compile } */
-+int main() { return 0; }
-Index: gcc-4.2.3/gcc/testsuite/gcc.dg/sibcall-3.c
-===================================================================
---- gcc-4.2.3.orig/gcc/testsuite/gcc.dg/sibcall-3.c 2005-07-20 08:39:38.000000000 +0200
-+++ gcc-4.2.3/gcc/testsuite/gcc.dg/sibcall-3.c 2008-05-21 13:45:54.358287873 +0200
+--- a/gcc/regrename.c
++++ b/gcc/regrename.c
+@@ -1592,6 +1592,9 @@ copyprop_hardreg_forward_1 (basic_block
+ bool changed = false;
+ rtx insn;
+
++ rtx prev_pred_test;
++ int prev_pred_insn_skipped = 0;
++
+ for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
+ {
+ int n_ops, i, alt, predicated;
+@@ -1630,7 +1633,60 @@ copyprop_hardreg_forward_1 (basic_block
+ || (predicated && recog_data.operand_type[i] == OP_OUT))
+ recog_data.operand_type[i] = OP_INOUT;
+ }
++
+
++ /* Added for targets (AVR32) which supports test operands to be modified
++ in cond_exec instruction. For these targets we cannot make a change to
++ the test operands if one of the test operands is an output operand This beacuse
++ changing the test operands might cause the need for inserting a new test
++ insns in the middle of a sequence of cond_exec insns and if the test operands
++ are modified these tests will fail.
++ */
++
++ if ( IFCVT_ALLOW_MODIFY_TEST_IN_INSN
++ && predicated )
++ {
++ int insn_skipped = 0;
++ rtx test = COND_EXEC_TEST (PATTERN (insn));
++
++ /* Check if the previous insn was a skipped predicated insn with the same
++ test as this predicated insns. If so we cannot do any modification to
++ this insn either since we cannot emit the test insn because the operands
++ are clobbered. */
++ if ( prev_pred_insn_skipped
++ && (rtx_equal_p (test, prev_pred_test)
++ || rtx_equal_p (test, reversed_condition (prev_pred_test))) )
++ {
++ insn_skipped = 1;
++ }
++ else
++ {
++ /* Check if the output operand is used in the test expression. */
++ for (i = 0; i < n_ops; ++i)
++ if ( recog_data.operand_type[i] == OP_INOUT
++ && reg_mentioned_p (recog_data.operand[i], test) )
++ {
++ insn_skipped = 1;
++ break;
++ }
++
++ }
++
++ prev_pred_test = test;
++ prev_pred_insn_skipped = insn_skipped;
++ if ( insn_skipped )
++ {
++ if (insn == BB_END (bb))
++ break;
++ else
++ continue;
++ }
++ }
++ else
++ {
++ prev_pred_insn_skipped = 0;
++ }
++
+ /* For each earlyclobber operand, zap the value data. */
+ for (i = 0; i < n_ops; i++)
+ if (recog_op_alt[i][alt].earlyclobber)
+--- a/gcc/sched-deps.c
++++ b/gcc/sched-deps.c
+@@ -649,7 +649,14 @@ fixup_sched_groups (rtx insn)
+
+ prev_nonnote = prev_nonnote_insn (insn);
+ if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
+- && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
++ /* Modification for AVR32 by RP: Why is this here, this will
++ cause instruction to be without any dependencies which might
++ cause it to be moved anywhere. For the AVR32 we try to keep
++ a group of conditionals together even if they are mutual exclusive.
++ */
++ && (! sched_insns_conditions_mutex_p (insn, prev_nonnote)
++ || GET_CODE (PATTERN (insn)) == COND_EXEC )
++ )
+ add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
+ }
+ \f
+@@ -1123,8 +1130,29 @@ sched_analyze_insn (struct deps *deps, r
+
+ if (code == COND_EXEC)
+ {
++#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN
++ if (IFCVT_ALLOW_MODIFY_TEST_IN_INSN)
++ {
++ /* Check if we have a group og conditional instructions with the same test.
++ If so we must make sure that they are not scheduled apart in order to
++ avoid unnecesarry tests and if one of the registers in the test is modified
++ in the instruction this is needed to ensure correct code. */
++ if ( prev_nonnote_insn (insn)
++ && INSN_P (prev_nonnote_insn (insn))
++ && GET_CODE (PATTERN (prev_nonnote_insn (insn))) == COND_EXEC
++ && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 0), XEXP (COND_EXEC_TEST (x), 0))
++ && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 1), XEXP (COND_EXEC_TEST (x), 1))
++ && ( GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == GET_CODE (COND_EXEC_TEST (x))
++ || GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == reversed_comparison_code (COND_EXEC_TEST (x), insn)))
++ {
++ SCHED_GROUP_P (insn) = 1;
++ //CANT_MOVE (prev_nonnote_insn (insn)) = 1;
++ }
++ }
++#endif
+ sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
+
++
+ /* ??? Should be recording conditions so we reduce the number of
+ false dependencies. */
+ x = COND_EXEC_CODE (x);
+--- a/gcc/testsuite/gcc.dg/sibcall-3.c
++++ b/gcc/testsuite/gcc.dg/sibcall-3.c
@@ -5,7 +5,7 @@
Copyright (C) 2002 Free Software Foundation Inc.
Contributed by Hans-Peter Nilsson <hp@bitrange.com> */
/* { dg-options "-O2 -foptimize-sibling-calls" } */
/* The option -foptimize-sibling-calls is the default, but serves as
-Index: gcc-4.2.3/gcc/testsuite/gcc.dg/sibcall-4.c
-===================================================================
---- gcc-4.2.3.orig/gcc/testsuite/gcc.dg/sibcall-4.c 2005-07-20 08:39:38.000000000 +0200
-+++ gcc-4.2.3/gcc/testsuite/gcc.dg/sibcall-4.c 2008-05-21 13:45:54.362288662 +0200
+--- a/gcc/testsuite/gcc.dg/sibcall-4.c
++++ b/gcc/testsuite/gcc.dg/sibcall-4.c
@@ -5,7 +5,7 @@
Copyright (C) 2002 Free Software Foundation Inc.
Contributed by Hans-Peter Nilsson <hp@bitrange.com> */
/* { dg-options "-O2 -foptimize-sibling-calls" } */
/* The option -foptimize-sibling-calls is the default, but serves as
-Index: gcc-4.2.3/gcc/testsuite/gcc.dg/trampoline-1.c
-===================================================================
---- gcc-4.2.3.orig/gcc/testsuite/gcc.dg/trampoline-1.c 2004-08-03 10:22:26.000000000 +0200
-+++ gcc-4.2.3/gcc/testsuite/gcc.dg/trampoline-1.c 2008-05-21 13:45:54.362288662 +0200
-@@ -46,6 +46,8 @@
+--- a/gcc/testsuite/gcc.dg/trampoline-1.c
++++ b/gcc/testsuite/gcc.dg/trampoline-1.c
+@@ -46,6 +46,8 @@ void foo (void)
int main (void)
{
+#endif
return 0;
}
-Index: gcc-4.2.3/gcc/testsuite/gfortran.dg/char_pointer_assign.f90
-===================================================================
---- gcc-4.2.3.orig/gcc/testsuite/gfortran.dg/char_pointer_assign.f90 2005-05-29 18:03:43.000000000 +0200
-+++ gcc-4.2.3/gcc/testsuite/gfortran.dg/char_pointer_assign.f90 2008-05-21 13:45:54.362288662 +0200
-@@ -1,4 +1,4 @@
--! { dg-do run }
-+! { dg-do run }\r
- program char_pointer_assign\r
- ! Test character pointer assignments, required\r
- ! to fix PR18890 and PR21297\r
-@@ -8,7 +8,7 @@
- character*4, target :: t2(4) =(/"lmno","lmno","lmno","lmno"/)\r
- character*4 :: const\r
- character*4, pointer :: c1, c3\r
-- character*4, pointer :: c2(:), c4(:)
-+ character*4, pointer :: c2(:), c4(:)\r
- allocate (c3, c4(4))\r
- ! Scalars first.\r
- c3 = "lmno" ! pointer = constant\r
-@@ -24,13 +24,13 @@
- \r
- ! Now arrays.\r
- c4 = "lmno" ! pointer = constant\r
-- t2 = c4 ! target = pointer
-- c2 => t2 ! pointer =>target
-- const = c2(1)
-+ t2 = c4 ! target = pointer\r
-+ c2 => t2 ! pointer =>target\r
-+ const = c2(1)\r
- const(2:3) ="nm" ! c2(:)(2:3) = "nm" is still broken\r
- c2 = const\r
- c4 = c2 ! pointer = pointer\r
-- const = c4(1)
-+ const = c4(1)\r
- const(1:1) ="o" ! c4(:)(1:1) = "o" is still broken\r
- const(4:4) ="l" ! c4(:)(4:4) = "l" is still broken\r
- c4 = const\r
-Index: gcc-4.2.3/gcc/testsuite/g++.old-deja/g++.pt/static11.C
-===================================================================
---- gcc-4.2.3.orig/gcc/testsuite/g++.old-deja/g++.pt/static11.C 2006-02-22 10:05:07.000000000 +0100
-+++ gcc-4.2.3/gcc/testsuite/g++.old-deja/g++.pt/static11.C 2008-05-21 13:45:54.362288662 +0200
+--- a/gcc/testsuite/g++.old-deja/g++.pt/static11.C
++++ b/gcc/testsuite/g++.old-deja/g++.pt/static11.C
@@ -2,7 +2,7 @@
// in their dejagnu baseboard description) require that the status is
// final when exit is entered (or main returns), and not "overruled" by a
// Bug: g++ was failing to destroy C<int>::a because it was using two
// different sentry variables for construction and destruction.
-Index: gcc-4.2.3/libjava/classpath/external/relaxngDatatype/copying.txt
-===================================================================
---- gcc-4.2.3.orig/libjava/classpath/external/relaxngDatatype/copying.txt 2006-03-10 14:25:35.000000000 +0100
-+++ gcc-4.2.3/libjava/classpath/external/relaxngDatatype/copying.txt 2008-05-21 13:45:54.362288662 +0200
-@@ -1,30 +1,30 @@
--Copyright (c) 2001, Thai Open Source Software Center Ltd, Sun Microsystems.\r
--All rights reserved.\r
--\r
--Redistribution and use in source and binary forms, with or without\r
--modification, are permitted provided that the following conditions are\r
--met:\r
--\r
-- Redistributions of source code must retain the above copyright\r
-- notice, this list of conditions and the following disclaimer.\r
--\r
-- Redistributions in binary form must reproduce the above copyright\r
-- notice, this list of conditions and the following disclaimer in\r
-- the documentation and/or other materials provided with the\r
-- distribution.\r
--\r
-- Neither the names of the copyright holders nor the names of its\r
-- contributors may be used to endorse or promote products derived\r
-- from this software without specific prior written permission.\r
--\r
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
--"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
--LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
--A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR\r
--CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\r
--EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\r
--PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\r
--PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\r
--LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\r
--NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r
--SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-+Copyright (c) 2001, Thai Open Source Software Center Ltd, Sun Microsystems.
-+All rights reserved.
-+
-+Redistribution and use in source and binary forms, with or without
-+modification, are permitted provided that the following conditions are
-+met:
-+
-+ Redistributions of source code must retain the above copyright
-+ notice, this list of conditions and the following disclaimer.
-+
-+ Redistributions in binary form must reproduce the above copyright
-+ notice, this list of conditions and the following disclaimer in
-+ the documentation and/or other materials provided with the
-+ distribution.
-+
-+ Neither the names of the copyright holders nor the names of its
-+ contributors may be used to endorse or promote products derived
-+ from this software without specific prior written permission.
-+
-+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
-+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-Index: gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeBuilder.java
-===================================================================
---- gcc-4.2.3.orig/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeBuilder.java 2006-03-10 14:25:35.000000000 +0100
-+++ gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeBuilder.java 2008-05-21 13:45:54.362288662 +0200
-@@ -1,45 +1,45 @@
--package org.relaxng.datatype;\r
--\r
--/**\r
-- * Creates a user-defined type by adding parameters to\r
-- * the pre-defined type.\r
-- * \r
-- * @author <a href="mailto:jjc@jclark.com">James Clark</a>\r
-- * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>\r
-- */\r
--public interface DatatypeBuilder {\r
-- \r
-- /**\r
-- * Adds a new parameter.\r
-- *\r
-- * @param name\r
-- * The name of the parameter to be added.\r
-- * @param strValue\r
-- * The raw value of the parameter. Caller may not normalize\r
-- * this value because any white space is potentially significant.\r
-- * @param context\r
-- * The context information which can be used by the callee to\r
-- * acquire additional information. This context object is\r
-- * valid only during this method call. The callee may not\r
-- * keep a reference to this object.\r
-- * @exception DatatypeException\r
-- * When the given parameter is inappropriate for some reason.\r
-- * The callee is responsible to recover from this error.\r
-- * That is, the object should behave as if no such error\r
-- * was occured.\r
-- */\r
-- void addParameter( String name, String strValue, ValidationContext context )\r
-- throws DatatypeException;\r
-- \r
-- /**\r
-- * Derives a new Datatype from a Datatype by parameters that\r
-- * were already set through the addParameter method.\r
-- * \r
-- * @exception DatatypeException\r
-- * DatatypeException must be thrown if the derivation is\r
-- * somehow invalid. For example, a required parameter is missing,\r
-- * etc. The exception should contain a diagnosis message\r
-- * if possible.\r
-- */\r
-- Datatype createDatatype() throws DatatypeException;\r
--}\r
-+package org.relaxng.datatype;
-+
-+/**
-+ * Creates a user-defined type by adding parameters to
-+ * the pre-defined type.
-+ *
-+ * @author <a href="mailto:jjc@jclark.com">James Clark</a>
-+ * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>
-+ */
-+public interface DatatypeBuilder {
-+
-+ /**
-+ * Adds a new parameter.
-+ *
-+ * @param name
-+ * The name of the parameter to be added.
-+ * @param strValue
-+ * The raw value of the parameter. Caller may not normalize
-+ * this value because any white space is potentially significant.
-+ * @param context
-+ * The context information which can be used by the callee to
-+ * acquire additional information. This context object is
-+ * valid only during this method call. The callee may not
-+ * keep a reference to this object.
-+ * @exception DatatypeException
-+ * When the given parameter is inappropriate for some reason.
-+ * The callee is responsible to recover from this error.
-+ * That is, the object should behave as if no such error
-+ * was occured.
-+ */
-+ void addParameter( String name, String strValue, ValidationContext context )
-+ throws DatatypeException;
-+
-+ /**
-+ * Derives a new Datatype from a Datatype by parameters that
-+ * were already set through the addParameter method.
-+ *
-+ * @exception DatatypeException
-+ * DatatypeException must be thrown if the derivation is
-+ * somehow invalid. For example, a required parameter is missing,
-+ * etc. The exception should contain a diagnosis message
-+ * if possible.
-+ */
-+ Datatype createDatatype() throws DatatypeException;
-+}
-Index: gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeException.java
-===================================================================
---- gcc-4.2.3.orig/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeException.java 2006-03-10 14:25:35.000000000 +0100
-+++ gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeException.java 2008-05-21 13:45:54.366287774 +0200
-@@ -1,39 +1,39 @@
--package org.relaxng.datatype;\r
--\r
--/**\r
-- * Signals Datatype related exceptions.\r
-- * \r
-- * @author <a href="mailto:jjc@jclark.com">James Clark</a>\r
-- * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>\r
-- */\r
--public class DatatypeException extends Exception {\r
-- \r
-- public DatatypeException( int index, String msg ) {\r
-- super(msg);\r
-- this.index = index;\r
-- }\r
-- public DatatypeException( String msg ) {\r
-- this(UNKNOWN,msg);\r
-- }\r
-- /**\r
-- * A constructor for those datatype libraries which don't support any\r
-- * diagnostic information at all.\r
-- */\r
-- public DatatypeException() {\r
-- this(UNKNOWN,null);\r
-- }\r
-- \r
-- \r
-- private final int index;\r
-- \r
-- public static final int UNKNOWN = -1;\r
--\r
-- /**\r
-- * Gets the index of the content where the error occured.\r
-- * UNKNOWN can be returned to indicate that no index information\r
-- * is available.\r
-- */\r
-- public int getIndex() {\r
-- return index;\r
-- }\r
--}\r
-+package org.relaxng.datatype;
-+
-+/**
-+ * Signals Datatype related exceptions.
-+ *
-+ * @author <a href="mailto:jjc@jclark.com">James Clark</a>
-+ * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>
-+ */
-+public class DatatypeException extends Exception {
-+
-+ public DatatypeException( int index, String msg ) {
-+ super(msg);
-+ this.index = index;
-+ }
-+ public DatatypeException( String msg ) {
-+ this(UNKNOWN,msg);
-+ }
-+ /**
-+ * A constructor for those datatype libraries which don't support any
-+ * diagnostic information at all.
-+ */
-+ public DatatypeException() {
-+ this(UNKNOWN,null);
-+ }
-+
-+
-+ private final int index;
-+
-+ public static final int UNKNOWN = -1;
-+
-+ /**
-+ * Gets the index of the content where the error occured.
-+ * UNKNOWN can be returned to indicate that no index information
-+ * is available.
-+ */
-+ public int getIndex() {
-+ return index;
-+ }
-+}
-Index: gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/Datatype.java
-===================================================================
---- gcc-4.2.3.orig/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/Datatype.java 2006-03-10 14:25:35.000000000 +0100
-+++ gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/Datatype.java 2008-05-21 13:45:54.366287774 +0200
-@@ -1,237 +1,237 @@
--package org.relaxng.datatype;\r
--\r
--/**\r
-- * Datatype object.\r
-- * \r
-- * This object has the following functionality:\r
-- * \r
-- * <ol>\r
-- * <li> functionality to identify a class of character sequences. This is\r
-- * done through the isValid method.\r
-- * \r
-- * <li> functionality to produce a "value object" from a character sequence and\r
-- * context information.\r
-- * \r
-- * <li> functionality to test the equality of two value objects.\r
-- * </ol>\r
-- * \r
-- * This interface also defines the createStreamingValidator method,\r
-- * which is intended to efficiently support the validation of\r
-- * large character sequences.\r
-- * \r
-- * @author <a href="mailto:jjc@jclark.com">James Clark</a>\r
-- * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>\r
-- */\r
--public interface Datatype {\r
-- \r
-- /**\r
-- * Checks if the specified 'literal' matches this Datatype\r
-- * with respect to the current context.\r
-- * \r
-- * @param literal\r
-- * the lexical representation to be checked.\r
-- * @param context\r
-- * If this datatype is context-dependent\r
-- * (i.e. the {@link #isContextDependent} method returns true),\r
-- * then the caller must provide a non-null valid context object.\r
-- * Otherwise, the caller can pass null.\r
-- * \r
-- * @return\r
-- * true if the 'literal' is a member of this Datatype;\r
-- * false if it's not a member of this Datatype.\r
-- */\r
-- boolean isValid( String literal, ValidationContext context );\r
-- \r
-- /**\r
-- * Similar to the isValid method but throws an exception with diagnosis\r
-- * in case of errors.\r
-- * \r
-- * <p>\r
-- * If the specified 'literal' is a valid lexical representation for this\r
-- * datatype, then this method must return without throwing any exception.\r
-- * If not, the callee must throw an exception (with diagnosis message,\r
-- * if possible.)\r
-- * \r
-- * <p>\r
-- * The application can use this method to provide detailed error message\r
-- * to users. This method is kept separate from the isValid method to\r
-- * achieve higher performance during normal validation.\r
-- * \r
-- * @exception DatatypeException\r
-- * If the given literal is invalid, then this exception is thrown.\r
-- * If the callee supports error diagnosis, then the exception should\r
-- * contain a diagnosis message.\r
-- */\r
-- void checkValid( String literal, ValidationContext context )\r
-- throws DatatypeException;\r
-- \r
-- /**\r
-- * Creates an instance of a streaming validator for this type.\r
-- * \r
-- * <p>\r
-- * By using streaming validators instead of the isValid method,\r
-- * the caller can avoid keeping the entire string, which is\r
-- * sometimes quite big, in memory.\r
-- * \r
-- * @param context\r
-- * If this datatype is context-dependent\r
-- * (i.e. the {@link #isContextDependent} method returns true),\r
-- * then the caller must provide a non-null valid context object.\r
-- * Otherwise, the caller can pass null.\r
-- * The callee may keep a reference to this context object\r
-- * only while the returned streaming validator is being used.\r
-- */\r
-- DatatypeStreamingValidator createStreamingValidator( ValidationContext context );\r
-- \r
-- /**\r
-- * Converts lexcial value and the current context to the corresponding\r
-- * value object.\r
-- * \r
-- * <p>\r
-- * The caller cannot generally assume that the value object is\r
-- * a meaningful Java object. For example, the caller cannot expect\r
-- * this method to return <code>java.lang.Number</code> type for\r
-- * the "integer" type of XML Schema Part 2.\r
-- * \r
-- * <p>\r
-- * Also, the caller cannot assume that the equals method and\r
-- * the hashCode method of the value object are consistent with\r
-- * the semantics of the datatype. For that purpose, the sameValue\r
-- * method and the valueHashCode method have to be used. Note that\r
-- * this means you cannot use classes like\r
-- * <code>java.util.Hashtable</code> to store the value objects.\r
-- * \r
-- * <p>\r
-- * The returned value object should be used solely for the sameValue\r
-- * and valueHashCode methods.\r
-- * \r
-- * @param context\r
-- * If this datatype is context-dependent\r
-- * (when the {@link #isContextDependent} method returns true),\r
-- * then the caller must provide a non-null valid context object.\r
-- * Otherwise, the caller can pass null.\r
-- * \r
-- * @return null\r
-- * when the given lexical value is not a valid lexical\r
-- * value for this type.\r
-- */\r
-- Object createValue( String literal, ValidationContext context );\r
-- \r
-- /**\r
-- * Tests the equality of two value objects which were originally\r
-- * created by the createValue method of this object.\r
-- * \r
-- * The behavior is undefined if objects not created by this type\r
-- * are passed. It is the caller's responsibility to ensure that\r
-- * value objects belong to this type.\r
-- * \r
-- * @return\r
-- * true if two value objects are considered equal according to\r
-- * the definition of this datatype; false if otherwise.\r
-- */\r
-- boolean sameValue( Object value1, Object value2 );\r
-- \r
-- \r
-- /**\r
-- * Computes the hash code for a value object,\r
-- * which is consistent with the sameValue method.\r
-- * \r
-- * @return\r
-- * hash code for the specified value object.\r
-- */\r
-- int valueHashCode( Object value );\r
--\r
--\r
--\r
-- \r
-- /**\r
-- * Indicates that the datatype doesn't have ID/IDREF semantics.\r
-- * \r
-- * This value is one of the possible return values of the\r
-- * {@link #getIdType} method.\r
-- */\r
-- public static final int ID_TYPE_NULL = 0;\r
-- \r
-- /**\r
-- * Indicates that RELAX NG compatibility processors should\r
-- * treat this datatype as having ID semantics.\r
-- * \r
-- * This value is one of the possible return values of the\r
-- * {@link #getIdType} method.\r
-- */\r
-- public static final int ID_TYPE_ID = 1;\r
-- \r
-- /**\r
-- * Indicates that RELAX NG compatibility processors should\r
-- * treat this datatype as having IDREF semantics.\r
-- * \r
-- * This value is one of the possible return values of the\r
-- * {@link #getIdType} method.\r
-- */\r
-- public static final int ID_TYPE_IDREF = 2;\r
-- \r
-- /**\r
-- * Indicates that RELAX NG compatibility processors should\r
-- * treat this datatype as having IDREFS semantics.\r
-- * \r
-- * This value is one of the possible return values of the\r
-- * {@link #getIdType} method.\r
-- */\r
-- public static final int ID_TYPE_IDREFS = 3;\r
-- \r
-- /**\r
-- * Checks if the ID/IDREF semantics is associated with this\r
-- * datatype.\r
-- * \r
-- * <p>\r
-- * This method is introduced to support the RELAX NG DTD\r
-- * compatibility spec. (Of course it's always free to use\r
-- * this method for other purposes.)\r
-- * \r
-- * <p>\r
-- * If you are implementing a datatype library and have no idea about\r
-- * the "RELAX NG DTD compatibility" thing, just return\r
-- * <code>ID_TYPE_NULL</code> is fine.\r
-- * \r
-- * @return\r
-- * If this datatype doesn't have any ID/IDREF semantics,\r
-- * it returns {@link #ID_TYPE_NULL}. If it has such a semantics\r
-- * (for example, XSD:ID, XSD:IDREF and comp:ID type), then\r
-- * it returns {@link #ID_TYPE_ID}, {@link #ID_TYPE_IDREF} or\r
-- * {@link #ID_TYPE_IDREFS}.\r
-- */\r
-- public int getIdType();\r
-- \r
-- \r
-- /**\r
-- * Checks if this datatype may need a context object for\r
-- * the validation.\r
-- * \r
-- * <p>\r
-- * The callee must return true even when the context\r
-- * is not always necessary. (For example, the "QName" type\r
-- * doesn't need a context object when validating unprefixed\r
-- * string. But nonetheless QName must return true.)\r
-- * \r
-- * <p>\r
-- * XSD's <code>string</code> and <code>short</code> types\r
-- * are examples of context-independent datatypes.\r
-- * Its <code>QName</code> and <code>ENTITY</code> types\r
-- * are examples of context-dependent datatypes.\r
-- * \r
-- * <p>\r
-- * When a datatype is context-independent, then\r
-- * the {@link #isValid} method, the {@link #checkValid} method,\r
-- * the {@link #createStreamingValidator} method and\r
-- * the {@link #createValue} method can be called without\r
-- * providing a context object.\r
-- * \r
-- * @return\r
-- * <b>true</b> if this datatype is context-dependent\r
-- * (it needs a context object sometimes);\r
-- * \r
-- * <b>false</b> if this datatype is context-<b>in</b>dependent\r
-- * (it never needs a context object).\r
-- */\r
-- public boolean isContextDependent();\r
--}\r
-+package org.relaxng.datatype;
-+
-+/**
-+ * Datatype object.
-+ *
-+ * This object has the following functionality:
-+ *
-+ * <ol>
-+ * <li> functionality to identify a class of character sequences. This is
-+ * done through the isValid method.
-+ *
-+ * <li> functionality to produce a "value object" from a character sequence and
-+ * context information.
-+ *
-+ * <li> functionality to test the equality of two value objects.
-+ * </ol>
-+ *
-+ * This interface also defines the createStreamingValidator method,
-+ * which is intended to efficiently support the validation of
-+ * large character sequences.
-+ *
-+ * @author <a href="mailto:jjc@jclark.com">James Clark</a>
-+ * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>
-+ */
-+public interface Datatype {
-+
-+ /**
-+ * Checks if the specified 'literal' matches this Datatype
-+ * with respect to the current context.
-+ *
-+ * @param literal
-+ * the lexical representation to be checked.
-+ * @param context
-+ * If this datatype is context-dependent
-+ * (i.e. the {@link #isContextDependent} method returns true),
-+ * then the caller must provide a non-null valid context object.
-+ * Otherwise, the caller can pass null.
-+ *
-+ * @return
-+ * true if the 'literal' is a member of this Datatype;
-+ * false if it's not a member of this Datatype.
-+ */
-+ boolean isValid( String literal, ValidationContext context );
-+
-+ /**
-+ * Similar to the isValid method but throws an exception with diagnosis
-+ * in case of errors.
-+ *
-+ * <p>
-+ * If the specified 'literal' is a valid lexical representation for this
-+ * datatype, then this method must return without throwing any exception.
-+ * If not, the callee must throw an exception (with diagnosis message,
-+ * if possible.)
-+ *
-+ * <p>
-+ * The application can use this method to provide detailed error message
-+ * to users. This method is kept separate from the isValid method to
-+ * achieve higher performance during normal validation.
-+ *
-+ * @exception DatatypeException
-+ * If the given literal is invalid, then this exception is thrown.
-+ * If the callee supports error diagnosis, then the exception should
-+ * contain a diagnosis message.
-+ */
-+ void checkValid( String literal, ValidationContext context )
-+ throws DatatypeException;
-+
-+ /**
-+ * Creates an instance of a streaming validator for this type.
-+ *
-+ * <p>
-+ * By using streaming validators instead of the isValid method,
-+ * the caller can avoid keeping the entire string, which is
-+ * sometimes quite big, in memory.
-+ *
-+ * @param context
-+ * If this datatype is context-dependent
-+ * (i.e. the {@link #isContextDependent} method returns true),
-+ * then the caller must provide a non-null valid context object.
-+ * Otherwise, the caller can pass null.
-+ * The callee may keep a reference to this context object
-+ * only while the returned streaming validator is being used.
-+ */
-+ DatatypeStreamingValidator createStreamingValidator( ValidationContext context );
-+
-+ /**
-+ * Converts lexcial value and the current context to the corresponding
-+ * value object.
-+ *
-+ * <p>
-+ * The caller cannot generally assume that the value object is
-+ * a meaningful Java object. For example, the caller cannot expect
-+ * this method to return <code>java.lang.Number</code> type for
-+ * the "integer" type of XML Schema Part 2.
-+ *
-+ * <p>
-+ * Also, the caller cannot assume that the equals method and
-+ * the hashCode method of the value object are consistent with
-+ * the semantics of the datatype. For that purpose, the sameValue
-+ * method and the valueHashCode method have to be used. Note that
-+ * this means you cannot use classes like
-+ * <code>java.util.Hashtable</code> to store the value objects.
-+ *
-+ * <p>
-+ * The returned value object should be used solely for the sameValue
-+ * and valueHashCode methods.
-+ *
-+ * @param context
-+ * If this datatype is context-dependent
-+ * (when the {@link #isContextDependent} method returns true),
-+ * then the caller must provide a non-null valid context object.
-+ * Otherwise, the caller can pass null.
-+ *
-+ * @return null
-+ * when the given lexical value is not a valid lexical
-+ * value for this type.
-+ */
-+ Object createValue( String literal, ValidationContext context );
-+
-+ /**
-+ * Tests the equality of two value objects which were originally
-+ * created by the createValue method of this object.
-+ *
-+ * The behavior is undefined if objects not created by this type
-+ * are passed. It is the caller's responsibility to ensure that
-+ * value objects belong to this type.
-+ *
-+ * @return
-+ * true if two value objects are considered equal according to
-+ * the definition of this datatype; false if otherwise.
-+ */
-+ boolean sameValue( Object value1, Object value2 );
-+
-+
-+ /**
-+ * Computes the hash code for a value object,
-+ * which is consistent with the sameValue method.
-+ *
-+ * @return
-+ * hash code for the specified value object.
-+ */
-+ int valueHashCode( Object value );
-+
-+
-+
-+
-+ /**
-+ * Indicates that the datatype doesn't have ID/IDREF semantics.
-+ *
-+ * This value is one of the possible return values of the
-+ * {@link #getIdType} method.
-+ */
-+ public static final int ID_TYPE_NULL = 0;
-+
-+ /**
-+ * Indicates that RELAX NG compatibility processors should
-+ * treat this datatype as having ID semantics.
-+ *
-+ * This value is one of the possible return values of the
-+ * {@link #getIdType} method.
-+ */
-+ public static final int ID_TYPE_ID = 1;
-+
-+ /**
-+ * Indicates that RELAX NG compatibility processors should
-+ * treat this datatype as having IDREF semantics.
-+ *
-+ * This value is one of the possible return values of the
-+ * {@link #getIdType} method.
-+ */
-+ public static final int ID_TYPE_IDREF = 2;
-+
-+ /**
-+ * Indicates that RELAX NG compatibility processors should
-+ * treat this datatype as having IDREFS semantics.
-+ *
-+ * This value is one of the possible return values of the
-+ * {@link #getIdType} method.
-+ */
-+ public static final int ID_TYPE_IDREFS = 3;
-+
-+ /**
-+ * Checks if the ID/IDREF semantics is associated with this
-+ * datatype.
-+ *
-+ * <p>
-+ * This method is introduced to support the RELAX NG DTD
-+ * compatibility spec. (Of course it's always free to use
-+ * this method for other purposes.)
-+ *
-+ * <p>
-+ * If you are implementing a datatype library and have no idea about
-+ * the "RELAX NG DTD compatibility" thing, just return
-+ * <code>ID_TYPE_NULL</code> is fine.
-+ *
-+ * @return
-+ * If this datatype doesn't have any ID/IDREF semantics,
-+ * it returns {@link #ID_TYPE_NULL}. If it has such a semantics
-+ * (for example, XSD:ID, XSD:IDREF and comp:ID type), then
-+ * it returns {@link #ID_TYPE_ID}, {@link #ID_TYPE_IDREF} or
-+ * {@link #ID_TYPE_IDREFS}.
-+ */
-+ public int getIdType();
-+
-+
-+ /**
-+ * Checks if this datatype may need a context object for
-+ * the validation.
-+ *
-+ * <p>
-+ * The callee must return true even when the context
-+ * is not always necessary. (For example, the "QName" type
-+ * doesn't need a context object when validating unprefixed
-+ * string. But nonetheless QName must return true.)
-+ *
-+ * <p>
-+ * XSD's <code>string</code> and <code>short</code> types
-+ * are examples of context-independent datatypes.
-+ * Its <code>QName</code> and <code>ENTITY</code> types
-+ * are examples of context-dependent datatypes.
-+ *
-+ * <p>
-+ * When a datatype is context-independent, then
-+ * the {@link #isValid} method, the {@link #checkValid} method,
-+ * the {@link #createStreamingValidator} method and
-+ * the {@link #createValue} method can be called without
-+ * providing a context object.
-+ *
-+ * @return
-+ * <b>true</b> if this datatype is context-dependent
-+ * (it needs a context object sometimes);
-+ *
-+ * <b>false</b> if this datatype is context-<b>in</b>dependent
-+ * (it never needs a context object).
-+ */
-+ public boolean isContextDependent();
-+}
-Index: gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeLibraryFactory.java
-===================================================================
---- gcc-4.2.3.orig/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeLibraryFactory.java 2006-03-10 14:25:35.000000000 +0100
-+++ gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeLibraryFactory.java 2008-05-21 13:45:54.366287774 +0200
-@@ -1,26 +1,26 @@
--package org.relaxng.datatype;\r
--\r
--/**\r
-- * Factory class for the DatatypeLibrary class.\r
-- * \r
-- * <p>\r
-- * The datatype library should provide the implementation of\r
-- * this interface if it wants to be found by the schema processors.\r
-- * The implementor also have to place a file in your jar file.\r
-- * See the reference datatype library implementation for detail.\r
-- * \r
-- * @author <a href="mailto:jjc@jclark.com">James Clark</a>\r
-- * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>\r
-- */\r
--public interface DatatypeLibraryFactory\r
--{\r
-- /**\r
-- * Creates a new instance of a DatatypeLibrary that supports \r
-- * the specified namespace URI.\r
-- * \r
-- * @return\r
-- * <code>null</code> if the specified namespace URI is not\r
-- * supported. \r
-- */\r
-- DatatypeLibrary createDatatypeLibrary( String namespaceURI );\r
--}\r
-+package org.relaxng.datatype;
-+
-+/**
-+ * Factory class for the DatatypeLibrary class.
-+ *
-+ * <p>
-+ * The datatype library should provide the implementation of
-+ * this interface if it wants to be found by the schema processors.
-+ * The implementor also have to place a file in your jar file.
-+ * See the reference datatype library implementation for detail.
-+ *
-+ * @author <a href="mailto:jjc@jclark.com">James Clark</a>
-+ * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>
-+ */
-+public interface DatatypeLibraryFactory
-+{
-+ /**
-+ * Creates a new instance of a DatatypeLibrary that supports
-+ * the specified namespace URI.
-+ *
-+ * @return
-+ * <code>null</code> if the specified namespace URI is not
-+ * supported.
-+ */
-+ DatatypeLibrary createDatatypeLibrary( String namespaceURI );
-+}
-Index: gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeLibrary.java
-===================================================================
---- gcc-4.2.3.orig/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeLibrary.java 2006-03-10 14:25:35.000000000 +0100
-+++ gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeLibrary.java 2008-05-21 13:45:54.366287774 +0200
-@@ -1,37 +1,37 @@
--package org.relaxng.datatype;\r
--\r
--/**\r
-- * A Datatype library\r
-- * \r
-- * @author <a href="mailto:jjc@jclark.com">James Clark</a>\r
-- * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>\r
-- */\r
--public interface DatatypeLibrary {\r
-- \r
-- /**\r
-- * Creates a new instance of DatatypeBuilder.\r
-- * \r
-- * The callee should throw a DatatypeException in case of an error.\r
-- * \r
-- * @param baseTypeLocalName\r
-- * The local name of the base type.\r
-- * \r
-- * @return\r
-- * A non-null valid datatype object.\r
-- */\r
-- DatatypeBuilder createDatatypeBuilder( String baseTypeLocalName )\r
-- throws DatatypeException;\r
-- \r
-- /**\r
-- * Gets or creates a pre-defined type.\r
-- * \r
-- * This is just a short-cut of\r
-- * <code>createDatatypeBuilder(typeLocalName).createDatatype();</code>\r
-- * \r
-- * The callee should throw a DatatypeException in case of an error.\r
-- * \r
-- * @return\r
-- * A non-null valid datatype object.\r
-- */\r
-- Datatype createDatatype( String typeLocalName ) throws DatatypeException;\r
--}\r
-+package org.relaxng.datatype;
-+
-+/**
-+ * A Datatype library
-+ *
-+ * @author <a href="mailto:jjc@jclark.com">James Clark</a>
-+ * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>
-+ */
-+public interface DatatypeLibrary {
-+
-+ /**
-+ * Creates a new instance of DatatypeBuilder.
-+ *
-+ * The callee should throw a DatatypeException in case of an error.
-+ *
-+ * @param baseTypeLocalName
-+ * The local name of the base type.
-+ *
-+ * @return
-+ * A non-null valid datatype object.
-+ */
-+ DatatypeBuilder createDatatypeBuilder( String baseTypeLocalName )
-+ throws DatatypeException;
-+
-+ /**
-+ * Gets or creates a pre-defined type.
-+ *
-+ * This is just a short-cut of
-+ * <code>createDatatypeBuilder(typeLocalName).createDatatype();</code>
-+ *
-+ * The callee should throw a DatatypeException in case of an error.
-+ *
-+ * @return
-+ * A non-null valid datatype object.
-+ */
-+ Datatype createDatatype( String typeLocalName ) throws DatatypeException;
-+}
-Index: gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeStreamingValidator.java
-===================================================================
---- gcc-4.2.3.orig/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeStreamingValidator.java 2006-03-10 14:25:35.000000000 +0100
-+++ gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/DatatypeStreamingValidator.java 2008-05-21 13:45:54.366287774 +0200
-@@ -1,46 +1,46 @@
--package org.relaxng.datatype;\r
--\r
--/**\r
-- * Datatype streaming validator.\r
-- * \r
-- * <p>\r
-- * The streaming validator is an optional feature that is useful for\r
-- * certain Datatypes. It allows the caller to incrementally provide\r
-- * the literal.\r
-- * \r
-- * @author <a href="mailto:jjc@jclark.com">James Clark</a>\r
-- * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>\r
-- */\r
--public interface DatatypeStreamingValidator {\r
-- \r
-- /**\r
-- * Passes an additional fragment of the literal.\r
-- * \r
-- * <p>\r
-- * The application can call this method several times, then call\r
-- * the isValid method (or the checkValid method) to check the validity\r
-- * of the accumulated characters.\r
-- */\r
-- void addCharacters( char[] buf, int start, int len );\r
-- \r
-- /**\r
-- * Tells if the accumulated literal is valid with respect to\r
-- * the underlying Datatype.\r
-- * \r
-- * @return\r
-- * True if it is valid. False if otherwise.\r
-- */\r
-- boolean isValid();\r
-- \r
-- /**\r
-- * Similar to the isValid method, but this method throws\r
-- * Exception (with possibly diagnostic information), instead of\r
-- * returning false.\r
-- * \r
-- * @exception DatatypeException\r
-- * If the callee supports the diagnosis and the accumulated\r
-- * literal is invalid, then this exception that possibly\r
-- * contains diagnosis information is thrown.\r
-- */\r
-- void checkValid() throws DatatypeException;\r
--}\r
-+package org.relaxng.datatype;
-+
-+/**
-+ * Datatype streaming validator.
-+ *
-+ * <p>
-+ * The streaming validator is an optional feature that is useful for
-+ * certain Datatypes. It allows the caller to incrementally provide
-+ * the literal.
-+ *
-+ * @author <a href="mailto:jjc@jclark.com">James Clark</a>
-+ * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>
-+ */
-+public interface DatatypeStreamingValidator {
-+
-+ /**
-+ * Passes an additional fragment of the literal.
-+ *
-+ * <p>
-+ * The application can call this method several times, then call
-+ * the isValid method (or the checkValid method) to check the validity
-+ * of the accumulated characters.
-+ */
-+ void addCharacters( char[] buf, int start, int len );
-+
-+ /**
-+ * Tells if the accumulated literal is valid with respect to
-+ * the underlying Datatype.
-+ *
-+ * @return
-+ * True if it is valid. False if otherwise.
-+ */
-+ boolean isValid();
-+
-+ /**
-+ * Similar to the isValid method, but this method throws
-+ * Exception (with possibly diagnostic information), instead of
-+ * returning false.
-+ *
-+ * @exception DatatypeException
-+ * If the callee supports the diagnosis and the accumulated
-+ * literal is invalid, then this exception that possibly
-+ * contains diagnosis information is thrown.
-+ */
-+ void checkValid() throws DatatypeException;
-+}
-Index: gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/helpers/DatatypeLibraryLoader.java
-===================================================================
---- gcc-4.2.3.orig/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/helpers/DatatypeLibraryLoader.java 2006-03-10 14:25:35.000000000 +0100
-+++ gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/helpers/DatatypeLibraryLoader.java 2008-05-21 13:45:54.370287167 +0200
-@@ -1,262 +1,262 @@
--/**\r
-- * Copyright (c) 2001, Thai Open Source Software Center Ltd\r
-- * All rights reserved.\r
-- * \r
-- * Redistribution and use in source and binary forms, with or without\r
-- * modification, are permitted provided that the following conditions are\r
-- * met:\r
-- * \r
-- * Redistributions of source code must retain the above copyright\r
-- * notice, this list of conditions and the following disclaimer.\r
-- * \r
-- * Redistributions in binary form must reproduce the above copyright\r
-- * notice, this list of conditions and the following disclaimer in\r
-- * the documentation and/or other materials provided with the\r
-- * distribution.\r
-- * \r
-- * Neither the name of the Thai Open Source Software Center Ltd nor\r
-- * the names of its contributors may be used to endorse or promote\r
-- * products derived from this software without specific prior written\r
-- * permission.\r
-- * \r
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
-- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
-- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
-- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR\r
-- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\r
-- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\r
-- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\r
-- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\r
-- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\r
-- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-- */\r
--package org.relaxng.datatype.helpers;\r
--\r
--import org.relaxng.datatype.DatatypeLibraryFactory;\r
--import org.relaxng.datatype.DatatypeLibrary;\r
--import java.util.Enumeration;\r
--import java.util.NoSuchElementException;\r
--import java.util.Vector;\r
--import java.io.Reader;\r
--import java.io.InputStream;\r
--import java.io.InputStreamReader;\r
--import java.io.BufferedReader;\r
--import java.io.IOException;\r
--import java.io.UnsupportedEncodingException;\r
--import java.net.URL;\r
--\r
--/**\r
-- * Discovers the datatype library implementation from the classpath.\r
-- * \r
-- * <p>\r
-- * The call of the createDatatypeLibrary method finds an implementation\r
-- * from a given datatype library URI at run-time.\r
-- */\r
--public class DatatypeLibraryLoader implements DatatypeLibraryFactory {\r
-- private final Service service = new Service(DatatypeLibraryFactory.class);\r
--\r
-- public DatatypeLibrary createDatatypeLibrary(String uri) {\r
-- for (Enumeration e = service.getProviders();\r
-- e.hasMoreElements();) {\r
-- DatatypeLibraryFactory factory\r
-- = (DatatypeLibraryFactory)e.nextElement();\r
-- DatatypeLibrary library = factory.createDatatypeLibrary(uri);\r
-- if (library != null)\r
-- return library;\r
-- }\r
-- return null;\r
-- }\r
--\r
-- private static class Service {\r
-- private final Class serviceClass;\r
-- private final Enumeration configFiles;\r
-- private Enumeration classNames = null;\r
-- private final Vector providers = new Vector();\r
-- private Loader loader;\r
--\r
-- private class ProviderEnumeration implements Enumeration {\r
-- private int nextIndex = 0;\r
--\r
-- public boolean hasMoreElements() {\r
-- return nextIndex < providers.size() || moreProviders();\r
-- }\r
--\r
-- public Object nextElement() {\r
-- try {\r
-- return providers.elementAt(nextIndex++);\r
-- }\r
-- catch (ArrayIndexOutOfBoundsException e) {\r
-- throw new NoSuchElementException();\r
-- }\r
-- }\r
-- }\r
--\r
-- private static class Singleton implements Enumeration {\r
-- private Object obj;\r
-- private Singleton(Object obj) {\r
-- this.obj = obj;\r
-- }\r
--\r
-- public boolean hasMoreElements() {\r
-- return obj != null;\r
-- }\r
--\r
-- public Object nextElement() {\r
-- if (obj == null)\r
-- throw new NoSuchElementException();\r
-- Object tem = obj;\r
-- obj = null;\r
-- return tem;\r
-- }\r
-- }\r
--\r
-- // JDK 1.1\r
-- private static class Loader {\r
-- Enumeration getResources(String resName) {\r
-- ClassLoader cl = Loader.class.getClassLoader();\r
-- URL url;\r
-- if (cl == null)\r
-- url = ClassLoader.getSystemResource(resName);\r
-- else\r
-- url = cl.getResource(resName);\r
-- return new Singleton(url);\r
-- }\r
--\r
-- Class loadClass(String name) throws ClassNotFoundException {\r
-- return Class.forName(name);\r
-- }\r
-- }\r
--\r
-- // JDK 1.2+\r
-- private static class Loader2 extends Loader {\r
-- private ClassLoader cl;\r
--\r
-- Loader2() {\r
-- cl = Loader2.class.getClassLoader();\r
-- // If the thread context class loader has the class loader\r
-- // of this class as an ancestor, use the thread context class\r
-- // loader. Otherwise, the thread context class loader\r
-- // probably hasn't been set up properly, so don't use it.\r
-- ClassLoader clt = Thread.currentThread().getContextClassLoader();\r
-- for (ClassLoader tem = clt; tem != null; tem = tem.getParent())\r
-- if (tem == cl) {\r
-- cl = clt;\r
-- break;\r
-- }\r
-- }\r
--\r
-- Enumeration getResources(String resName) {\r
-- try {\r
-- return cl.getResources(resName);\r
-- }\r
-- catch (IOException e) {\r
-- return new Singleton(null);\r
-- }\r
-- }\r
--\r
-- Class loadClass(String name) throws ClassNotFoundException {\r
-- return Class.forName(name, true, cl);\r
-- }\r
-- }\r
--\r
-- public Service(Class cls) {\r
-- try {\r
-- loader = new Loader2();\r
-- }\r
-- catch (NoSuchMethodError e) {\r
-- loader = new Loader();\r
-- }\r
-- serviceClass = cls;\r
-- String resName = "META-INF/services/" + serviceClass.getName();\r
-- configFiles = loader.getResources(resName);\r
-- }\r
--\r
-- public Enumeration getProviders() {\r
-- return new ProviderEnumeration();\r
-- }\r
--\r
-- synchronized private boolean moreProviders() {\r
-- for (;;) {\r
-- while (classNames == null) {\r
-- if (!configFiles.hasMoreElements())\r
-- return false;\r
-- classNames = parseConfigFile((URL)configFiles.nextElement());\r
-- }\r
-- while (classNames.hasMoreElements()) {\r
-- String className = (String)classNames.nextElement();\r
-- try {\r
-- Class cls = loader.loadClass(className);\r
-- Object obj = cls.newInstance();\r
-- if (serviceClass.isInstance(obj)) {\r
-- providers.addElement(obj);\r
-- return true;\r
-- }\r
-- }\r
-- catch (ClassNotFoundException e) { }\r
-- catch (InstantiationException e) { }\r
-- catch (IllegalAccessException e) { }\r
-- catch (LinkageError e) { }\r
-- }\r
-- classNames = null;\r
-- }\r
-- }\r
--\r
-- private static final int START = 0;\r
-- private static final int IN_NAME = 1;\r
-- private static final int IN_COMMENT = 2;\r
--\r
-- private static Enumeration parseConfigFile(URL url) {\r
-- try {\r
-- InputStream in = url.openStream();\r
-- Reader r;\r
-- try {\r
-- r = new InputStreamReader(in, "UTF-8");\r
-- }\r
-- catch (UnsupportedEncodingException e) {\r
-- r = new InputStreamReader(in, "UTF8");\r
-- }\r
-- r = new BufferedReader(r);\r
-- Vector tokens = new Vector();\r
-- StringBuffer tokenBuf = new StringBuffer();\r
-- int state = START;\r
-- for (;;) {\r
-- int n = r.read();\r
-- if (n < 0)\r
-- break;\r
-- char c = (char)n;\r
-- switch (c) {\r
-- case '\r':\r
-- case '\n':\r
-- state = START;\r
-- break;\r
-- case ' ':\r
-- case '\t':\r
-- break;\r
-- case '#':\r
-- state = IN_COMMENT;\r
-- break;\r
-- default:\r
-- if (state != IN_COMMENT) {\r
-- state = IN_NAME;\r
-- tokenBuf.append(c);\r
-- }\r
-- break;\r
-- }\r
-- if (tokenBuf.length() != 0 && state != IN_NAME) {\r
-- tokens.addElement(tokenBuf.toString());\r
-- tokenBuf.setLength(0);\r
-- }\r
-- }\r
-- if (tokenBuf.length() != 0)\r
-- tokens.addElement(tokenBuf.toString());\r
-- return tokens.elements();\r
-- }\r
-- catch (IOException e) {\r
-- return null;\r
-- }\r
-- }\r
-- }\r
-- \r
--}\r
--\r
-+/**
-+ * Copyright (c) 2001, Thai Open Source Software Center Ltd
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are
-+ * met:
-+ *
-+ * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ *
-+ * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ *
-+ * Neither the name of the Thai Open Source Software Center Ltd nor
-+ * the names of its contributors may be used to endorse or promote
-+ * products derived from this software without specific prior written
-+ * permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
-+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+package org.relaxng.datatype.helpers;
-+
-+import org.relaxng.datatype.DatatypeLibraryFactory;
-+import org.relaxng.datatype.DatatypeLibrary;
-+import java.util.Enumeration;
-+import java.util.NoSuchElementException;
-+import java.util.Vector;
-+import java.io.Reader;
-+import java.io.InputStream;
-+import java.io.InputStreamReader;
-+import java.io.BufferedReader;
-+import java.io.IOException;
-+import java.io.UnsupportedEncodingException;
-+import java.net.URL;
-+
-+/**
-+ * Discovers the datatype library implementation from the classpath.
-+ *
-+ * <p>
-+ * The call of the createDatatypeLibrary method finds an implementation
-+ * from a given datatype library URI at run-time.
-+ */
-+public class DatatypeLibraryLoader implements DatatypeLibraryFactory {
-+ private final Service service = new Service(DatatypeLibraryFactory.class);
-+
-+ public DatatypeLibrary createDatatypeLibrary(String uri) {
-+ for (Enumeration e = service.getProviders();
-+ e.hasMoreElements();) {
-+ DatatypeLibraryFactory factory
-+ = (DatatypeLibraryFactory)e.nextElement();
-+ DatatypeLibrary library = factory.createDatatypeLibrary(uri);
-+ if (library != null)
-+ return library;
-+ }
-+ return null;
-+ }
-+
-+ private static class Service {
-+ private final Class serviceClass;
-+ private final Enumeration configFiles;
-+ private Enumeration classNames = null;
-+ private final Vector providers = new Vector();
-+ private Loader loader;
-+
-+ private class ProviderEnumeration implements Enumeration {
-+ private int nextIndex = 0;
-+
-+ public boolean hasMoreElements() {
-+ return nextIndex < providers.size() || moreProviders();
-+ }
-+
-+ public Object nextElement() {
-+ try {
-+ return providers.elementAt(nextIndex++);
-+ }
-+ catch (ArrayIndexOutOfBoundsException e) {
-+ throw new NoSuchElementException();
-+ }
-+ }
-+ }
-+
-+ private static class Singleton implements Enumeration {
-+ private Object obj;
-+ private Singleton(Object obj) {
-+ this.obj = obj;
-+ }
-+
-+ public boolean hasMoreElements() {
-+ return obj != null;
-+ }
-+
-+ public Object nextElement() {
-+ if (obj == null)
-+ throw new NoSuchElementException();
-+ Object tem = obj;
-+ obj = null;
-+ return tem;
-+ }
-+ }
-+
-+ // JDK 1.1
-+ private static class Loader {
-+ Enumeration getResources(String resName) {
-+ ClassLoader cl = Loader.class.getClassLoader();
-+ URL url;
-+ if (cl == null)
-+ url = ClassLoader.getSystemResource(resName);
-+ else
-+ url = cl.getResource(resName);
-+ return new Singleton(url);
-+ }
-+
-+ Class loadClass(String name) throws ClassNotFoundException {
-+ return Class.forName(name);
-+ }
-+ }
-+
-+ // JDK 1.2+
-+ private static class Loader2 extends Loader {
-+ private ClassLoader cl;
-+
-+ Loader2() {
-+ cl = Loader2.class.getClassLoader();
-+ // If the thread context class loader has the class loader
-+ // of this class as an ancestor, use the thread context class
-+ // loader. Otherwise, the thread context class loader
-+ // probably hasn't been set up properly, so don't use it.
-+ ClassLoader clt = Thread.currentThread().getContextClassLoader();
-+ for (ClassLoader tem = clt; tem != null; tem = tem.getParent())
-+ if (tem == cl) {
-+ cl = clt;
-+ break;
-+ }
-+ }
-+
-+ Enumeration getResources(String resName) {
-+ try {
-+ return cl.getResources(resName);
-+ }
-+ catch (IOException e) {
-+ return new Singleton(null);
-+ }
-+ }
-+
-+ Class loadClass(String name) throws ClassNotFoundException {
-+ return Class.forName(name, true, cl);
-+ }
-+ }
-+
-+ public Service(Class cls) {
-+ try {
-+ loader = new Loader2();
-+ }
-+ catch (NoSuchMethodError e) {
-+ loader = new Loader();
-+ }
-+ serviceClass = cls;
-+ String resName = "META-INF/services/" + serviceClass.getName();
-+ configFiles = loader.getResources(resName);
-+ }
-+
-+ public Enumeration getProviders() {
-+ return new ProviderEnumeration();
-+ }
-+
-+ synchronized private boolean moreProviders() {
-+ for (;;) {
-+ while (classNames == null) {
-+ if (!configFiles.hasMoreElements())
-+ return false;
-+ classNames = parseConfigFile((URL)configFiles.nextElement());
-+ }
-+ while (classNames.hasMoreElements()) {
-+ String className = (String)classNames.nextElement();
-+ try {
-+ Class cls = loader.loadClass(className);
-+ Object obj = cls.newInstance();
-+ if (serviceClass.isInstance(obj)) {
-+ providers.addElement(obj);
-+ return true;
-+ }
-+ }
-+ catch (ClassNotFoundException e) { }
-+ catch (InstantiationException e) { }
-+ catch (IllegalAccessException e) { }
-+ catch (LinkageError e) { }
-+ }
-+ classNames = null;
-+ }
-+ }
-+
-+ private static final int START = 0;
-+ private static final int IN_NAME = 1;
-+ private static final int IN_COMMENT = 2;
-+
-+ private static Enumeration parseConfigFile(URL url) {
-+ try {
-+ InputStream in = url.openStream();
-+ Reader r;
-+ try {
-+ r = new InputStreamReader(in, "UTF-8");
-+ }
-+ catch (UnsupportedEncodingException e) {
-+ r = new InputStreamReader(in, "UTF8");
-+ }
-+ r = new BufferedReader(r);
-+ Vector tokens = new Vector();
-+ StringBuffer tokenBuf = new StringBuffer();
-+ int state = START;
-+ for (;;) {
-+ int n = r.read();
-+ if (n < 0)
-+ break;
-+ char c = (char)n;
-+ switch (c) {
-+ case '\r':
-+ case '\n':
-+ state = START;
-+ break;
-+ case ' ':
-+ case '\t':
-+ break;
-+ case '#':
-+ state = IN_COMMENT;
-+ break;
-+ default:
-+ if (state != IN_COMMENT) {
-+ state = IN_NAME;
-+ tokenBuf.append(c);
-+ }
-+ break;
-+ }
-+ if (tokenBuf.length() != 0 && state != IN_NAME) {
-+ tokens.addElement(tokenBuf.toString());
-+ tokenBuf.setLength(0);
-+ }
-+ }
-+ if (tokenBuf.length() != 0)
-+ tokens.addElement(tokenBuf.toString());
-+ return tokens.elements();
-+ }
-+ catch (IOException e) {
-+ return null;
-+ }
-+ }
-+ }
-+
-+}
-+
-Index: gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/helpers/ParameterlessDatatypeBuilder.java
-===================================================================
---- gcc-4.2.3.orig/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/helpers/ParameterlessDatatypeBuilder.java 2006-03-10 14:25:35.000000000 +0100
-+++ gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/helpers/ParameterlessDatatypeBuilder.java 2008-05-21 13:45:54.370287167 +0200
-@@ -1,42 +1,42 @@
--package org.relaxng.datatype.helpers;\r
--\r
--import org.relaxng.datatype.*;\r
--\r
--/**\r
-- * Dummy implementation of {@link DatatypeBuilder}.\r
-- * \r
-- * This implementation can be used for Datatypes which have no parameters.\r
-- * Any attempt to add parameters will be rejected.\r
-- * \r
-- * <p>\r
-- * Typical usage would be:\r
-- * <PRE><XMP>\r
-- * class MyDatatypeLibrary implements DatatypeLibrary {\r
-- * ....\r
-- * DatatypeBuilder createDatatypeBuilder( String typeName ) {\r
-- * return new ParameterleessDatatypeBuilder(createDatatype(typeName));\r
-- * }\r
-- * ....\r
-- * }\r
-- * </XMP></PRE>\r
-- * \r
-- * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>\r
-- */\r
--public final class ParameterlessDatatypeBuilder implements DatatypeBuilder {\r
--\r
-- /** This type object is returned for the derive method. */\r
-- private final Datatype baseType;\r
-- \r
-- public ParameterlessDatatypeBuilder( Datatype baseType ) {\r
-- this.baseType = baseType;\r
-- }\r
-- \r
-- public void addParameter( String name, String strValue, ValidationContext context )\r
-- throws DatatypeException {\r
-- throw new DatatypeException();\r
-- }\r
-- \r
-- public Datatype createDatatype() throws DatatypeException {\r
-- return baseType;\r
-- }\r
--}\r
-+package org.relaxng.datatype.helpers;
-+
-+import org.relaxng.datatype.*;
-+
-+/**
-+ * Dummy implementation of {@link DatatypeBuilder}.
-+ *
-+ * This implementation can be used for Datatypes which have no parameters.
-+ * Any attempt to add parameters will be rejected.
-+ *
-+ * <p>
-+ * Typical usage would be:
-+ * <PRE><XMP>
-+ * class MyDatatypeLibrary implements DatatypeLibrary {
-+ * ....
-+ * DatatypeBuilder createDatatypeBuilder( String typeName ) {
-+ * return new ParameterleessDatatypeBuilder(createDatatype(typeName));
-+ * }
-+ * ....
-+ * }
-+ * </XMP></PRE>
-+ *
-+ * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>
-+ */
-+public final class ParameterlessDatatypeBuilder implements DatatypeBuilder {
-+
-+ /** This type object is returned for the derive method. */
-+ private final Datatype baseType;
-+
-+ public ParameterlessDatatypeBuilder( Datatype baseType ) {
-+ this.baseType = baseType;
-+ }
-+
-+ public void addParameter( String name, String strValue, ValidationContext context )
-+ throws DatatypeException {
-+ throw new DatatypeException();
-+ }
-+
-+ public Datatype createDatatype() throws DatatypeException {
-+ return baseType;
-+ }
-+}
-Index: gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/helpers/StreamingValidatorImpl.java
-===================================================================
---- gcc-4.2.3.orig/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/helpers/StreamingValidatorImpl.java 2006-03-10 14:25:35.000000000 +0100
-+++ gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/helpers/StreamingValidatorImpl.java 2008-05-21 13:45:54.370287167 +0200
-@@ -1,55 +1,55 @@
--package org.relaxng.datatype.helpers;\r
--\r
--import org.relaxng.datatype.*;\r
--\r
--/**\r
-- * Dummy implementation of {@link DatatypeStreamingValidator}.\r
-- * \r
-- * <p>\r
-- * This implementation can be used as a quick hack when the performance\r
-- * of streaming validation is not important. And this implementation\r
-- * also shows you how to implement the DatatypeStreamingValidator interface.\r
-- * \r
-- * <p>\r
-- * Typical usage would be:\r
-- * <PRE><XMP>\r
-- * class MyDatatype implements Datatype {\r
-- * ....\r
-- * public DatatypeStreamingValidator createStreamingValidator( ValidationContext context ) {\r
-- * return new StreamingValidatorImpl(this,context);\r
-- * }\r
-- * ....\r
-- * }\r
-- * </XMP></PRE>\r
-- * \r
-- * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>\r
-- */\r
--public final class StreamingValidatorImpl implements DatatypeStreamingValidator {\r
-- \r
-- /** This buffer accumulates characters. */\r
-- private final StringBuffer buffer = new StringBuffer();\r
-- \r
-- /** Datatype obejct that creates this streaming validator. */\r
-- private final Datatype baseType;\r
-- \r
-- /** The current context. */\r
-- private final ValidationContext context;\r
-- \r
-- public void addCharacters( char[] buf, int start, int len ) {\r
-- // append characters to the current buffer.\r
-- buffer.append(buf,start,len);\r
-- }\r
-- \r
-- public boolean isValid() {\r
-- return baseType.isValid(buffer.toString(),context);\r
-- }\r
-- \r
-- public void checkValid() throws DatatypeException {\r
-- baseType.checkValid(buffer.toString(),context);\r
-- }\r
-- \r
-- public StreamingValidatorImpl( Datatype baseType, ValidationContext context ) {\r
-- this.baseType = baseType;\r
-- this.context = context;\r
-- }\r
--}\r
-+package org.relaxng.datatype.helpers;
-+
-+import org.relaxng.datatype.*;
-+
-+/**
-+ * Dummy implementation of {@link DatatypeStreamingValidator}.
-+ *
-+ * <p>
-+ * This implementation can be used as a quick hack when the performance
-+ * of streaming validation is not important. And this implementation
-+ * also shows you how to implement the DatatypeStreamingValidator interface.
-+ *
-+ * <p>
-+ * Typical usage would be:
-+ * <PRE><XMP>
-+ * class MyDatatype implements Datatype {
-+ * ....
-+ * public DatatypeStreamingValidator createStreamingValidator( ValidationContext context ) {
-+ * return new StreamingValidatorImpl(this,context);
-+ * }
-+ * ....
-+ * }
-+ * </XMP></PRE>
-+ *
-+ * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>
-+ */
-+public final class StreamingValidatorImpl implements DatatypeStreamingValidator {
-+
-+ /** This buffer accumulates characters. */
-+ private final StringBuffer buffer = new StringBuffer();
-+
-+ /** Datatype obejct that creates this streaming validator. */
-+ private final Datatype baseType;
-+
-+ /** The current context. */
-+ private final ValidationContext context;
-+
-+ public void addCharacters( char[] buf, int start, int len ) {
-+ // append characters to the current buffer.
-+ buffer.append(buf,start,len);
-+ }
-+
-+ public boolean isValid() {
-+ return baseType.isValid(buffer.toString(),context);
-+ }
-+
-+ public void checkValid() throws DatatypeException {
-+ baseType.checkValid(buffer.toString(),context);
-+ }
-+
-+ public StreamingValidatorImpl( Datatype baseType, ValidationContext context ) {
-+ this.baseType = baseType;
-+ this.context = context;
-+ }
-+}
-Index: gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/ValidationContext.java
-===================================================================
---- gcc-4.2.3.orig/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/ValidationContext.java 2006-03-10 14:25:35.000000000 +0100
-+++ gcc-4.2.3/libjava/classpath/external/relaxngDatatype/org/relaxng/datatype/ValidationContext.java 2008-05-21 13:45:54.370287167 +0200
-@@ -1,66 +1,66 @@
--package org.relaxng.datatype;\r
--\r
--/**\r
-- * An interface that must be implemented by caller to\r
-- * provide context information that is necessary to \r
-- * perform validation of some Datatypes.\r
-- * \r
-- * @author <a href="mailto:jjc@jclark.com">James Clark</a>\r
-- * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>\r
-- */\r
--public interface ValidationContext {\r
-- \r
-- /**\r
-- * Resolves a namespace prefix to the corresponding namespace URI.\r
-- * \r
-- * This method is used for validating the QName type, for example.\r
-- *\r
-- * <p>\r
-- * If the prefix is "" (empty string), it indicates\r
-- * an unprefixed value. The callee\r
-- * should resolve it as for an unprefixed\r
-- * element, rather than for an unprefixed attribute.\r
-- * \r
-- * <p>\r
-- * If the prefix is "xml", then the callee must resolve\r
-- * this prefix into "http://www.w3.org/XML/1998/namespace",\r
-- * as defined in the XML Namespaces Recommendation.\r
-- * \r
-- * @return\r
-- * namespace URI of this prefix.\r
-- * If the specified prefix is not declared,\r
-- * the implementation must return null.\r
-- */\r
-- String resolveNamespacePrefix( String prefix );\r
--\r
-- /**\r
-- * Returns the base URI of the context. The null string may be returned\r
-- * if no base URI is known.\r
-- */\r
-- String getBaseUri();\r
--\r
-- /**\r
-- * Checks if an unparsed entity is declared with the\r
-- * specified name.\r
-- * \r
-- * @return\r
-- * true\r
-- * if the DTD has an unparsed entity declaration for\r
-- * the specified name.\r
-- * false\r
-- * otherwise.\r
-- */\r
-- boolean isUnparsedEntity( String entityName );\r
--\r
-- /**\r
-- * Checks if a notation is declared with the\r
-- * specified name.\r
-- * \r
-- * @return\r
-- * true\r
-- * if the DTD has a notation declaration for the specified name.\r
-- * false\r
-- * otherwise.\r
-- */\r
-- boolean isNotation( String notationName );\r
--}\r
-+package org.relaxng.datatype;
-+
-+/**
-+ * An interface that must be implemented by caller to
-+ * provide context information that is necessary to
-+ * perform validation of some Datatypes.
-+ *
-+ * @author <a href="mailto:jjc@jclark.com">James Clark</a>
-+ * @author <a href="mailto:kohsuke.kawaguchi@sun.com">Kohsuke KAWAGUCHI</a>
-+ */
-+public interface ValidationContext {
-+
-+ /**
-+ * Resolves a namespace prefix to the corresponding namespace URI.
-+ *
-+ * This method is used for validating the QName type, for example.
-+ *
-+ * <p>
-+ * If the prefix is "" (empty string), it indicates
-+ * an unprefixed value. The callee
-+ * should resolve it as for an unprefixed
-+ * element, rather than for an unprefixed attribute.
-+ *
-+ * <p>
-+ * If the prefix is "xml", then the callee must resolve
-+ * this prefix into "http://www.w3.org/XML/1998/namespace",
-+ * as defined in the XML Namespaces Recommendation.
-+ *
-+ * @return
-+ * namespace URI of this prefix.
-+ * If the specified prefix is not declared,
-+ * the implementation must return null.
-+ */
-+ String resolveNamespacePrefix( String prefix );
-+
-+ /**
-+ * Returns the base URI of the context. The null string may be returned
-+ * if no base URI is known.
-+ */
-+ String getBaseUri();
-+
-+ /**
-+ * Checks if an unparsed entity is declared with the
-+ * specified name.
-+ *
-+ * @return
-+ * true
-+ * if the DTD has an unparsed entity declaration for
-+ * the specified name.
-+ * false
-+ * otherwise.
-+ */
-+ boolean isUnparsedEntity( String entityName );
-+
-+ /**
-+ * Checks if a notation is declared with the
-+ * specified name.
-+ *
-+ * @return
-+ * true
-+ * if the DTD has a notation declaration for the specified name.
-+ * false
-+ * otherwise.
-+ */
-+ boolean isNotation( String notationName );
-+}
-Index: gcc-4.2.3/libjava/classpath/external/relaxngDatatype/README.txt
-===================================================================
---- gcc-4.2.3.orig/libjava/classpath/external/relaxngDatatype/README.txt 2006-03-10 14:25:35.000000000 +0100
-+++ gcc-4.2.3/libjava/classpath/external/relaxngDatatype/README.txt 2008-05-21 13:45:54.370287167 +0200
-@@ -1,54 +1,54 @@
--======================================================================\r
-- README FILE FOR DATATYPE INTERFACES FOR RELAX NG\r
--======================================================================\r
--\r
--\r
--\r
--RELAX NG supports multiple datatype vocabularies. To achive this, an\r
--interface between datatype vocabularies and schema processors is \r
--necessary. This interface is intended to be a standard Java interface\r
--for this purpose.\r
--\r
--\r
------------------------------------------------------------------------\r
--LICENSE\r
------------------------------------------------------------------------\r
--\r
--See copying.txt.\r
--\r
--Note: this license is the BSD license.\r
--\r
--\r
--\r
------------------------------------------------------------------------\r
--FOR DEVELOPER\r
------------------------------------------------------------------------\r
--\r
--If you are planning to implement a datatype library, A sample datatype\r
--library implementation by James Clark is available at [1], which\r
--comes with documentation and source code.\r
--\r
--If you are planning to implement a schema processor, then don't forget\r
--to check out org.relaxng.datatype.helpers.DatatypeLibraryLoader, as \r
--this allows you to dynamically locate datatype implementations.\r
--\r
--\r
------------------------------------------------------------------------\r
--LINKS\r
------------------------------------------------------------------------\r
--\r
--* OASIS RELAX NG TC\r
-- http://www.oasis-open.org/committees/relax-ng/\r
--* RELAX home page\r
-- http://www.xml.gr.jp/relax/\r
--\r
--\r
------------------------------------------------------------------------\r
--REFERENCES\r
------------------------------------------------------------------------\r
--[1] Sample datatype library implementation by James Clark\r
-- http://www.thaiopensource.com/relaxng/datatype-sample.zip\r
--\r
--Document written by Kohsuke Kawaguchi (kohsuke.kawaguchi@sun.com)\r
--======================================================================\r
--END OF README\r
-+======================================================================
-+ README FILE FOR DATATYPE INTERFACES FOR RELAX NG
-+======================================================================
-+
-+
-+
-+RELAX NG supports multiple datatype vocabularies. To achive this, an
-+interface between datatype vocabularies and schema processors is
-+necessary. This interface is intended to be a standard Java interface
-+for this purpose.
-+
-+
-+----------------------------------------------------------------------
-+LICENSE
-+----------------------------------------------------------------------
-+
-+See copying.txt.
-+
-+Note: this license is the BSD license.
-+
-+
-+
-+----------------------------------------------------------------------
-+FOR DEVELOPER
-+----------------------------------------------------------------------
-+
-+If you are planning to implement a datatype library, A sample datatype
-+library implementation by James Clark is available at [1], which
-+comes with documentation and source code.
-+
-+If you are planning to implement a schema processor, then don't forget
-+to check out org.relaxng.datatype.helpers.DatatypeLibraryLoader, as
-+this allows you to dynamically locate datatype implementations.
-+
-+
-+----------------------------------------------------------------------
-+LINKS
-+----------------------------------------------------------------------
-+
-+* OASIS RELAX NG TC
-+ http://www.oasis-open.org/committees/relax-ng/
-+* RELAX home page
-+ http://www.xml.gr.jp/relax/
-+
-+
-+----------------------------------------------------------------------
-+REFERENCES
-+----------------------------------------------------------------------
-+[1] Sample datatype library implementation by James Clark
-+ http://www.thaiopensource.com/relaxng/datatype-sample.zip
-+
-+Document written by Kohsuke Kawaguchi (kohsuke.kawaguchi@sun.com)
-+======================================================================
-+END OF README
-Index: gcc-4.2.3/libstdc++-v3/config/os/gnu-linux/ctype_base.h
-===================================================================
---- gcc-4.2.3.orig/libstdc++-v3/config/os/gnu-linux/ctype_base.h 2006-12-01 13:56:23.000000000 +0100
-+++ gcc-4.2.3/libstdc++-v3/config/os/gnu-linux/ctype_base.h 2008-05-21 13:45:54.370287167 +0200
-@@ -31,6 +31,8 @@
- //
- // ISO C++ 14882: 22.1 Locales
- //
-+#include <features.h>
-+#include <ctype.h>
-
- /** @file ctype_base.h
- * This is an internal header file, included by other library headers.
-@@ -45,8 +47,12 @@
- struct ctype_base
- {
- // Non-standard typedefs.
-- typedef const int* __to_type;
--
-+#ifdef __UCLIBC__
-+ typedef const __ctype_touplow_t* __to_type;
-+#else
-+ typedef const int* __to_type;
-+#endif
-+
- // NB: Offsets into ctype<char>::_M_table force a particular size
- // on the mask type. Because of this, we don't use an enum.
- typedef unsigned short mask;
-Index: gcc-4.2.3/libstdc++-v3/include/Makefile.in
-===================================================================
---- gcc-4.2.3.orig/libstdc++-v3/include/Makefile.in 2007-07-05 13:46:00.000000000 +0200
-+++ gcc-4.2.3/libstdc++-v3/include/Makefile.in 2008-05-21 13:45:54.374287956 +0200
-@@ -36,6 +36,7 @@
+--- a/libstdc++-v3/include/Makefile.in
++++ b/libstdc++-v3/include/Makefile.in
+@@ -36,6 +36,7 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
target_triplet = @target@
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
$(top_srcdir)/fragment.am
subdir = include
-Index: gcc-4.2.3/libstdc++-v3/libmath/Makefile.in
-===================================================================
---- gcc-4.2.3.orig/libstdc++-v3/libmath/Makefile.in 2006-10-16 21:08:22.000000000 +0200
-+++ gcc-4.2.3/libstdc++-v3/libmath/Makefile.in 2008-05-21 13:45:54.374287956 +0200
-@@ -37,6 +37,7 @@
+--- a/libstdc++-v3/libmath/Makefile.in
++++ b/libstdc++-v3/libmath/Makefile.in
+@@ -37,6 +37,7 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
target_triplet = @target@
subdir = libmath
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-Index: gcc-4.2.3/libstdc++-v3/libsupc++/Makefile.in
-===================================================================
---- gcc-4.2.3.orig/libstdc++-v3/libsupc++/Makefile.in 2006-10-16 21:08:22.000000000 +0200
-+++ gcc-4.2.3/libstdc++-v3/libsupc++/Makefile.in 2008-05-21 13:45:54.374287956 +0200
-@@ -38,6 +38,7 @@
+--- a/libstdc++-v3/libsupc++/Makefile.in
++++ b/libstdc++-v3/libsupc++/Makefile.in
+@@ -38,6 +38,7 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
target_triplet = @target@
DIST_COMMON = $(glibcxxinstall_HEADERS) $(srcdir)/Makefile.am \
$(srcdir)/Makefile.in $(top_srcdir)/fragment.am
subdir = libsupc++
-Index: gcc-4.2.3/libstdc++-v3/Makefile.in
-===================================================================
---- gcc-4.2.3.orig/libstdc++-v3/Makefile.in 2006-10-16 21:08:22.000000000 +0200
-+++ gcc-4.2.3/libstdc++-v3/Makefile.in 2008-05-21 13:45:54.374287956 +0200
-@@ -36,6 +36,7 @@
+--- a/libstdc++-v3/Makefile.in
++++ b/libstdc++-v3/Makefile.in
+@@ -36,6 +36,7 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
target_triplet = @target@
DIST_COMMON = README $(am__configure_deps) $(srcdir)/../config.guess \
$(srcdir)/../config.sub $(srcdir)/../install-sh \
$(srcdir)/../ltmain.sh $(srcdir)/../missing \
-Index: gcc-4.2.3/libstdc++-v3/po/Makefile.in
-===================================================================
---- gcc-4.2.3.orig/libstdc++-v3/po/Makefile.in 2006-10-16 21:08:22.000000000 +0200
-+++ gcc-4.2.3/libstdc++-v3/po/Makefile.in 2008-05-21 13:45:54.378287906 +0200
-@@ -36,6 +36,7 @@
+--- a/libstdc++-v3/po/Makefile.in
++++ b/libstdc++-v3/po/Makefile.in
+@@ -36,6 +36,7 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
target_triplet = @target@
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
$(top_srcdir)/fragment.am
subdir = po
-Index: gcc-4.2.3/libstdc++-v3/src/Makefile.in
-===================================================================
---- gcc-4.2.3.orig/libstdc++-v3/src/Makefile.in 2008-05-21 13:45:47.465287535 +0200
-+++ gcc-4.2.3/libstdc++-v3/src/Makefile.in 2008-05-21 13:45:54.378287906 +0200
-@@ -36,6 +36,7 @@
+--- a/libstdc++-v3/src/Makefile.in
++++ b/libstdc++-v3/src/Makefile.in
+@@ -36,6 +36,7 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
target_triplet = @target@
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
$(top_srcdir)/fragment.am
subdir = src
-Index: gcc-4.2.3/libstdc++-v3/acinclude.m4
-===================================================================
---- gcc-4.2.3.orig/libstdc++-v3/acinclude.m4 2008-05-21 13:45:43.925289703 +0200
-+++ gcc-4.2.3/libstdc++-v3/acinclude.m4 2008-05-21 13:45:54.378287906 +0200
-@@ -1392,8 +1392,8 @@
- #endif
- int main()
- {
-- const char __one[] = "Äuglein Augmen";
-- const char __two[] = "Äuglein";
-+ const char __one[] = "Äuglein Augmen";
-+ const char __two[] = "Äuglein";
- int i;
- int j;
- __locale_t loc;
+--- a/gcc/configure.ac
++++ b/gcc/configure.ac
+@@ -2971,7 +2971,7 @@ esac
+ case "$target" in
+ i?86*-*-* | mips*-*-* | alpha*-*-* | powerpc*-*-* | sparc*-*-* | m68*-*-* \
+ | x86_64*-*-* | hppa*-*-* | arm*-*-* | strongarm*-*-* | xscale*-*-* \
+- | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-*)
++ | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-* | avr32-*-*)
+ insn="nop"
+ ;;
+ ia64*-*-* | s390*-*-*)
+--- a/gcc/configure
++++ b/gcc/configure
+@@ -15610,7 +15610,7 @@ esac
+ case "$target" in
+ i?86*-*-* | mips*-*-* | alpha*-*-* | powerpc*-*-* | sparc*-*-* | m68*-*-* \
+ | x86_64*-*-* | hppa*-*-* | arm*-*-* | strongarm*-*-* | xscale*-*-* \
+- | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-*)
++ | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-* | avr32-*-*)
+ insn="nop"
+ ;;
+ ia64*-*-* | s390*-*-*)