| --- a/configure.in |
| +++ b/configure.in |
| @@ -503,6 +503,9 @@ case "${target}" in |
| arm-*-riscix*) |
| noconfigdirs="$noconfigdirs ld target-libgloss ${libgcj}" |
| ;; |
| + avr32-*-*) |
| + noconfigdirs="$noconfigdirs target-libiberty target-libmudflap target-libffi ${libgcj}" |
| + ;; |
| avr-*-*) |
| noconfigdirs="$noconfigdirs target-libiberty target-libstdc++-v3 ${libgcj}" |
| ;; |
| --- a/gcc/builtins.c |
| +++ b/gcc/builtins.c |
| @@ -9223,7 +9223,7 @@ validate_arglist (tree arglist, ...) |
| |
| do |
| { |
| - code = va_arg (ap, enum tree_code); |
| + code = va_arg (ap, int); |
| switch (code) |
| { |
| case 0: |
| --- a/gcc/calls.c |
| +++ b/gcc/calls.c |
| @@ -3447,7 +3447,7 @@ emit_library_call_value_1 (int retval, r |
| for (; count < nargs; count++) |
| { |
| rtx val = va_arg (p, rtx); |
| - enum machine_mode mode = va_arg (p, enum machine_mode); |
| + enum machine_mode mode = va_arg (p, int); |
| |
| /* We cannot convert the arg value to the mode the library wants here; |
| must do it earlier where we know the signedness of the arg. */ |
| --- a/gcc/c-incpath.c |
| +++ b/gcc/c-incpath.c |
| @@ -347,6 +347,18 @@ add_path (char *path, int chain, int cxx |
| char* c; |
| for (c = path; *c; c++) |
| if (*c == '\\') *c = '/'; |
| + /* Remove unnecessary trailing slashes. On some versions of MS |
| + Windows, trailing _forward_ slashes cause no problems for stat(). |
| + On newer versions, stat() does not recognise a directory that ends |
| + in a '\\' or '/', unless it is a drive root dir, such as "c:/", |
| + where it is obligatory. */ |
| + int pathlen = strlen (path); |
| + char* end = path + pathlen - 1; |
| + /* Preserve the lead '/' or lead "c:/". */ |
| + char* start = path + (pathlen > 2 && path[1] == ':' ? 3 : 1); |
| + |
| + for (; end > start && IS_DIR_SEPARATOR (*end); end--) |
| + *end = 0; |
| #endif |
| |
| p = XNEW (cpp_dir); |
| --- /dev/null |
| +++ b/gcc/config/avr32/avr32.c |
| @@ -0,0 +1,7915 @@ |
| +/* |
| + Target hooks and helper functions for AVR32. |
| + Copyright 2003-2006 Atmel Corporation. |
| + |
| + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| + Initial porting by Anders �dland. |
| + |
| + This file is part of GCC. |
| + |
| + This program is free software; you can redistribute it and/or modify |
| + it under the terms of the GNU General Public License as published by |
| + the Free Software Foundation; either version 2 of the License, or |
| + (at your option) any later version. |
| + |
| + This program is distributed in the hope that it will be useful, |
| + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + GNU General Public License for more details. |
| + |
| + You should have received a copy of the GNU General Public License |
| + along with this program; if not, write to the Free Software |
| + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ |
| + |
| +#include "config.h" |
| +#include "system.h" |
| +#include "coretypes.h" |
| +#include "tm.h" |
| +#include "rtl.h" |
| +#include "tree.h" |
| +#include "obstack.h" |
| +#include "regs.h" |
| +#include "hard-reg-set.h" |
| +#include "real.h" |
| +#include "insn-config.h" |
| +#include "conditions.h" |
| +#include "output.h" |
| +#include "insn-attr.h" |
| +#include "flags.h" |
| +#include "reload.h" |
| +#include "function.h" |
| +#include "expr.h" |
| +#include "optabs.h" |
| +#include "toplev.h" |
| +#include "recog.h" |
| +#include "ggc.h" |
| +#include "except.h" |
| +#include "c-pragma.h" |
| +#include "integrate.h" |
| +#include "tm_p.h" |
| +#include "langhooks.h" |
| + |
| +#include "target.h" |
| +#include "target-def.h" |
| + |
| +#include <ctype.h> |
| + |
| +/* Forward definitions of types. */ |
| +typedef struct minipool_node Mnode; |
| +typedef struct minipool_fixup Mfix; |
| + |
| +/* Obstack for minipool constant handling. */ |
| +static struct obstack minipool_obstack; |
| +static char *minipool_startobj; |
| +static rtx minipool_vector_label; |
| + |
| +/* True if we are currently building a constant table. */ |
| +int making_const_table; |
| + |
| +/* Some forward function declarations */ |
| +static unsigned long avr32_isr_value (tree); |
| +static unsigned long avr32_compute_func_type (void); |
| +static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *); |
| +static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *); |
| +static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args, |
| + int flags, bool * no_add_attrs); |
| +static void avr32_reorg (void); |
| +bool avr32_return_in_msb (tree type); |
| +bool avr32_vector_mode_supported (enum machine_mode mode); |
| +static void avr32_init_libfuncs (void); |
| + |
| + |
| +static void |
| +avr32_add_gc_roots (void) |
| +{ |
| + gcc_obstack_init (&minipool_obstack); |
| + minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0); |
| +} |
| + |
| + |
| +/* List of all known AVR32 parts */ |
| +static const struct part_type_s avr32_part_types[] = { |
| + /* name, part_type, architecture type, macro */ |
| + {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"}, |
| + {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"}, |
| + {"ap7001", PART_TYPE_AVR32_AP7001, ARCH_TYPE_AVR32_AP, "__AVR32_AP7001__"}, |
| + {"ap7002", PART_TYPE_AVR32_AP7002, ARCH_TYPE_AVR32_AP, "__AVR32_AP7002__"}, |
| + {"ap7200", PART_TYPE_AVR32_AP7200, ARCH_TYPE_AVR32_AP, "__AVR32_AP7200__"}, |
| + {"uc3a0128", PART_TYPE_AVR32_UC3A0128, ARCH_TYPE_AVR32_UCR2, |
| + "__AVR32_UC3A0128__"}, |
| + {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UCR2, |
| + "__AVR32_UC3A0256__"}, |
| + {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UCR2, |
| + "__AVR32_UC3A0512__"}, |
| + {"uc3a0512es", PART_TYPE_AVR32_UC3A0512ES, ARCH_TYPE_AVR32_UCR1, |
| + "__AVR32_UC3A0512ES__"}, |
| + {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UCR2, |
| + "__AVR32_UC3A1128__"}, |
| + {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UCR2, |
| + "__AVR32_UC3A1256__"}, |
| + {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UCR2, |
| + "__AVR32_UC3A1512__"}, |
| + {"uc3a1512es", PART_TYPE_AVR32_UC3A1512ES, ARCH_TYPE_AVR32_UCR1, |
| + "__AVR32_UC3A1512ES__"}, |
| + {"uc3a3revd", PART_TYPE_AVR32_UC3A3REVD, ARCH_TYPE_AVR32_UCR2NOMUL, |
| + "__AVR32_UC3A3256S__"}, |
| + {"uc3a364", PART_TYPE_AVR32_UC3A364, ARCH_TYPE_AVR32_UCR2, |
| + "__AVR32_UC3A364__"}, |
| + {"uc3a364s", PART_TYPE_AVR32_UC3A364S, ARCH_TYPE_AVR32_UCR2, |
| + "__AVR32_UC3A364S__"}, |
| + {"uc3a3128", PART_TYPE_AVR32_UC3A3128, ARCH_TYPE_AVR32_UCR2, |
| + "__AVR32_UC3A3128__"}, |
| + {"uc3a3128s", PART_TYPE_AVR32_UC3A3128S, ARCH_TYPE_AVR32_UCR2, |
| + "__AVR32_UC3A3128S__"}, |
| + {"uc3a3256", PART_TYPE_AVR32_UC3A3256, ARCH_TYPE_AVR32_UCR2, |
| + "__AVR32_UC3A3256__"}, |
| + {"uc3a3256s", PART_TYPE_AVR32_UC3A3256S, ARCH_TYPE_AVR32_UCR2, |
| + "__AVR32_UC3A3256S__"}, |
| + {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UCR1, |
| + "__AVR32_UC3B064__"}, |
| + {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UCR1, |
| + "__AVR32_UC3B0128__"}, |
| + {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UCR1, |
| + "__AVR32_UC3B0256__"}, |
| + {"uc3b0256es", PART_TYPE_AVR32_UC3B0256ES, ARCH_TYPE_AVR32_UCR1, |
| + "__AVR32_UC3B0256ES__"}, |
| + {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UCR1, |
| + "__AVR32_UC3B164__"}, |
| + {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UCR1, |
| + "__AVR32_UC3B1128__"}, |
| + {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UCR1, |
| + "__AVR32_UC3B1256__"}, |
| + {"uc3b1256es", PART_TYPE_AVR32_UC3B1256ES, ARCH_TYPE_AVR32_UCR1, |
| + "__AVR32_UC3B1256ES__"}, |
| + {NULL, 0, 0, NULL} |
| +}; |
| + |
| +/* List of all known AVR32 architectures */ |
| +static const struct arch_type_s avr32_arch_types[] = { |
| + /* name, architecture type, microarchitecture type, feature flags, macro */ |
| + {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B, |
| + (FLAG_AVR32_HAS_DSP |
| + | FLAG_AVR32_HAS_SIMD |
| + | FLAG_AVR32_HAS_UNALIGNED_WORD |
| + | FLAG_AVR32_HAS_BRANCH_PRED | FLAG_AVR32_HAS_RETURN_STACK |
| + | FLAG_AVR32_HAS_CACHES), |
| + "__AVR32_AP__"}, |
| + {"ucr1", ARCH_TYPE_AVR32_UCR1, UARCH_TYPE_AVR32A, |
| + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW), |
| + "__AVR32_UC__=1"}, |
| + {"ucr2", ARCH_TYPE_AVR32_UCR2, UARCH_TYPE_AVR32A, |
| + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW |
| + | FLAG_AVR32_HAS_V2_INSNS), |
| + "__AVR32_UC__=2"}, |
| + {"ucr2nomul", ARCH_TYPE_AVR32_UCR2NOMUL, UARCH_TYPE_AVR32A, |
| + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW |
| + | FLAG_AVR32_HAS_V2_INSNS | FLAG_AVR32_HAS_NO_MUL_INSNS), |
| + "__AVR32_UC__=3"}, |
| + {NULL, 0, 0, 0, NULL} |
| +}; |
| + |
| +/* Default arch name */ |
| +const char *avr32_arch_name = "none"; |
| +const char *avr32_part_name = "none"; |
| + |
| +const struct part_type_s *avr32_part; |
| +const struct arch_type_s *avr32_arch; |
| + |
| + |
| +/* Set default target_flags. */ |
| +#undef TARGET_DEFAULT_TARGET_FLAGS |
| +#define TARGET_DEFAULT_TARGET_FLAGS \ |
| + (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION | MASK_COND_EXEC_BEFORE_RELOAD) |
| + |
| +void |
| +avr32_optimization_options (int level, |
| + int size){ |
| + if (AVR32_ALWAYS_PIC) |
| + flag_pic = 1; |
| + |
| + /* Enable section anchors if optimization is enabled. */ |
| + if (level > 0 || size) |
| + flag_section_anchors = 1; |
| +} |
| + |
| +/* Override command line options */ |
| +void |
| +avr32_override_options (void) |
| +{ |
| + const struct part_type_s *part; |
| + const struct arch_type_s *arch; |
| + |
| + /*Add backward compability*/ |
| + if (strcmp ("uc", avr32_arch_name)== 0) |
| + { |
| + fprintf (stderr, "Warning: Deprecated arch `%s' specified. " |
| + "Please use '-march=ucr1' instead. " |
| + "Converting to arch 'ucr1'\n", |
| + avr32_arch_name); |
| + avr32_arch_name="ucr1"; |
| + } |
| + |
| + /* Check if arch type is set. */ |
| + for (arch = avr32_arch_types; arch->name; arch++) |
| + { |
| + if (strcmp (arch->name, avr32_arch_name) == 0) |
| + break; |
| + } |
| + avr32_arch = arch; |
| + |
| + if (!arch->name && strcmp("none", avr32_arch_name) != 0) |
| + { |
| + fprintf (stderr, "Unknown arch `%s' specified\n" |
| + "Known arch names:\n" |
| + "\tuc (deprecated)\n", |
| + avr32_arch_name); |
| + for (arch = avr32_arch_types; arch->name; arch++) |
| + fprintf (stderr, "\t%s\n", arch->name); |
| + avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP]; |
| + } |
| + |
| + /* Check if part type is set. */ |
| + for (part = avr32_part_types; part->name; part++) |
| + if (strcmp (part->name, avr32_part_name) == 0) |
| + break; |
| + |
| + avr32_part = part; |
| + if (!part->name) |
| + { |
| + fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n", |
| + avr32_part_name); |
| + for (part = avr32_part_types; part->name; part++) |
| + { |
| + if (strcmp("none", part->name) != 0) |
| + fprintf (stderr, "\t%s\n", part->name); |
| + } |
| + /* Set default to NONE*/ |
| + avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE]; |
| + } |
| + |
| + /* NB! option -march= overrides option -mpart |
| + * if both are used at the same time */ |
| + if (!arch->name) |
| + avr32_arch = &avr32_arch_types[avr32_part->arch_type]; |
| + |
| + /* If optimization level is two or greater, then align start of loops to a |
| + word boundary since this will allow folding the first insn of the loop. |
| + Do this only for targets supporting branch prediction. */ |
| + if (optimize >= 2 && TARGET_BRANCH_PRED) |
| + align_loops = 2; |
| + |
| + |
| + /* Enable fast-float library if unsafe math optimizations |
| + are used. */ |
| + if (flag_unsafe_math_optimizations) |
| + target_flags |= MASK_FAST_FLOAT; |
| + |
| + /* Check if we should set avr32_imm_in_const_pool |
| + based on if caches are present or not. */ |
| + if ( avr32_imm_in_const_pool == -1 ) |
| + { |
| + if ( TARGET_CACHES ) |
| + avr32_imm_in_const_pool = 1; |
| + else |
| + avr32_imm_in_const_pool = 0; |
| + } |
| + |
| + if (TARGET_NO_PIC) |
| + flag_pic = 0; |
| + |
| + avr32_add_gc_roots (); |
| +} |
| + |
| + |
| +/* |
| +If defined, a function that outputs the assembler code for entry to a |
| +function. The prologue is responsible for setting up the stack frame, |
| +initializing the frame pointer register, saving registers that must be |
| +saved, and allocating size additional bytes of storage for the |
| +local variables. size is an integer. file is a stdio |
| +stream to which the assembler code should be output. |
| + |
| +The label for the beginning of the function need not be output by this |
| +macro. That has already been done when the macro is run. |
| + |
| +To determine which registers to save, the macro can refer to the array |
| +regs_ever_live: element r is nonzero if hard register |
| +r is used anywhere within the function. This implies the function |
| +prologue should save register r, provided it is not one of the |
| +call-used registers. (TARGET_ASM_FUNCTION_EPILOGUE must likewise use |
| +regs_ever_live.) |
| + |
| +On machines that have ``register windows'', the function entry code does |
| +not save on the stack the registers that are in the windows, even if |
| +they are supposed to be preserved by function calls; instead it takes |
| +appropriate steps to ``push'' the register stack, if any non-call-used |
| +registers are used in the function. |
| + |
| +On machines where functions may or may not have frame-pointers, the |
| +function entry code must vary accordingly; it must set up the frame |
| +pointer if one is wanted, and not otherwise. To determine whether a |
| +frame pointer is in wanted, the macro can refer to the variable |
| +frame_pointer_needed. The variable's value will be 1 at run |
| +time in a function that needs a frame pointer. (see Elimination). |
| + |
| +The function entry code is responsible for allocating any stack space |
| +required for the function. This stack space consists of the regions |
| +listed below. In most cases, these regions are allocated in the |
| +order listed, with the last listed region closest to the top of the |
| +stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and |
| +the highest address if it is not defined). You can use a different order |
| +for a machine if doing so is more convenient or required for |
| +compatibility reasons. Except in cases where required by standard |
| +or by a debugger, there is no reason why the stack layout used by GCC |
| +need agree with that used by other compilers for a machine. |
| +*/ |
| + |
| +#undef TARGET_ASM_FUNCTION_PROLOGUE |
| +#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue |
| + |
| + |
| +#undef TARGET_DEFAULT_SHORT_ENUMS |
| +#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false |
| + |
| +#undef TARGET_PROMOTE_FUNCTION_ARGS |
| +#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true |
| + |
| +#undef TARGET_PROMOTE_FUNCTION_RETURN |
| +#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true |
| + |
| +#undef TARGET_PROMOTE_PROTOTYPES |
| +#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true |
| + |
| +#undef TARGET_MUST_PASS_IN_STACK |
| +#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack |
| + |
| +#undef TARGET_PASS_BY_REFERENCE |
| +#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference |
| + |
| +#undef TARGET_STRICT_ARGUMENT_NAMING |
| +#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming |
| + |
| +#undef TARGET_VECTOR_MODE_SUPPORTED_P |
| +#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported |
| + |
| +#undef TARGET_RETURN_IN_MEMORY |
| +#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory |
| + |
| +#undef TARGET_RETURN_IN_MSB |
| +#define TARGET_RETURN_IN_MSB avr32_return_in_msb |
| + |
| +#undef TARGET_ENCODE_SECTION_INFO |
| +#define TARGET_ENCODE_SECTION_INFO avr32_encode_section_info |
| + |
| +#undef TARGET_ARG_PARTIAL_BYTES |
| +#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes |
| + |
| +#undef TARGET_STRIP_NAME_ENCODING |
| +#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding |
| + |
| +#define streq(string1, string2) (strcmp (string1, string2) == 0) |
| + |
| +#undef TARGET_NARROW_VOLATILE_BITFIELD |
| +#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false |
| + |
| +#undef TARGET_ATTRIBUTE_TABLE |
| +#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table |
| + |
| +#undef TARGET_COMP_TYPE_ATTRIBUTES |
| +#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes |
| + |
| + |
| +#undef TARGET_RTX_COSTS |
| +#define TARGET_RTX_COSTS avr32_rtx_costs |
| + |
| +#undef TARGET_CANNOT_FORCE_CONST_MEM |
| +#define TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem |
| + |
| +#undef TARGET_ASM_INTEGER |
| +#define TARGET_ASM_INTEGER avr32_assemble_integer |
| + |
| +#undef TARGET_FUNCTION_VALUE |
| +#define TARGET_FUNCTION_VALUE avr32_function_value |
| + |
| +#undef TARGET_MIN_ANCHOR_OFFSET |
| +#define TARGET_MIN_ANCHOR_OFFSET (0) |
| + |
| +#undef TARGET_MAX_ANCHOR_OFFSET |
| +#define TARGET_MAX_ANCHOR_OFFSET ((1 << 15) - 1) |
| + |
| +#undef TARGET_SECONDARY_RELOAD |
| +#define TARGET_SECONDARY_RELOAD avr32_secondary_reload |
| + |
| +enum reg_class |
| +avr32_secondary_reload (bool in_p, rtx x, enum reg_class class ATTRIBUTE_UNUSED, |
| + enum machine_mode mode, secondary_reload_info *sri) |
| +{ |
| + |
| + if ( avr32_rmw_memory_operand (x, mode) ) |
| + { |
| + if (!in_p) |
| + sri->icode = CODE_FOR_reload_out_rmw_memory_operand; |
| + else |
| + sri->icode = CODE_FOR_reload_in_rmw_memory_operand; |
| + } |
| + return NO_REGS; |
| + |
| +} |
| + |
| +/* |
| + * Switches to the appropriate section for output of constant pool |
| + * entry x in mode. You can assume that x is some kind of constant in |
| + * RTL. The argument mode is redundant except in the case of a |
| + * const_int rtx. Select the section by calling readonly_data_ section |
| + * or one of the alternatives for other sections. align is the |
| + * constant alignment in bits. |
| + * |
| + * The default version of this function takes care of putting symbolic |
| + * constants in flag_ pic mode in data_section and everything else in |
| + * readonly_data_section. |
| + */ |
| +//#undef TARGET_ASM_SELECT_RTX_SECTION |
| +//#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section |
| + |
| + |
| +/* |
| + * If non-null, this hook performs a target-specific pass over the |
| + * instruction stream. The compiler will run it at all optimization |
| + * levels, just before the point at which it normally does |
| + * delayed-branch scheduling. |
| + * |
| + * The exact purpose of the hook varies from target to target. Some |
| + * use it to do transformations that are necessary for correctness, |
| + * such as laying out in-function constant pools or avoiding hardware |
| + * hazards. Others use it as an opportunity to do some |
| + * machine-dependent optimizations. |
| + * |
| + * You need not implement the hook if it has nothing to do. The |
| + * default definition is null. |
| + */ |
| +#undef TARGET_MACHINE_DEPENDENT_REORG |
| +#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg |
| + |
| +/* Target hook for assembling integer objects. |
| + Need to handle integer vectors */ |
| +static bool |
| +avr32_assemble_integer (rtx x, unsigned int size, int aligned_p) |
| +{ |
| + if (avr32_vector_mode_supported (GET_MODE (x))) |
| + { |
| + int i, units; |
| + |
| + if (GET_CODE (x) != CONST_VECTOR) |
| + abort (); |
| + |
| + units = CONST_VECTOR_NUNITS (x); |
| + |
| + switch (GET_MODE (x)) |
| + { |
| + case V2HImode: |
| + size = 2; |
| + break; |
| + case V4QImode: |
| + size = 1; |
| + break; |
| + default: |
| + abort (); |
| + } |
| + |
| + for (i = 0; i < units; i++) |
| + { |
| + rtx elt; |
| + |
| + elt = CONST_VECTOR_ELT (x, i); |
| + assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1); |
| + } |
| + |
| + return true; |
| + } |
| + |
| + return default_assemble_integer (x, size, aligned_p); |
| +} |
| + |
| +/* |
| + * This target hook describes the relative costs of RTL expressions. |
| + * |
| + * The cost may depend on the precise form of the expression, which is |
| + * available for examination in x, and the rtx code of the expression |
| + * in which it is contained, found in outer_code. code is the |
| + * expression code--redundant, since it can be obtained with GET_CODE |
| + * (x). |
| + * |
| + * In implementing this hook, you can use the construct COSTS_N_INSNS |
| + * (n) to specify a cost equal to n fast instructions. |
| + * |
| + * On entry to the hook, *total contains a default estimate for the |
| + * cost of the expression. The hook should modify this value as |
| + * necessary. Traditionally, the default costs are COSTS_N_INSNS (5) |
| + * for multiplications, COSTS_N_INSNS (7) for division and modulus |
| + * operations, and COSTS_N_INSNS (1) for all other operations. |
| + * |
| + * When optimizing for code size, i.e. when optimize_size is non-zero, |
| + * this target hook should be used to estimate the relative size cost |
| + * of an expression, again relative to COSTS_N_INSNS. |
| + * |
| + * The hook returns true when all subexpressions of x have been |
| + * processed, and false when rtx_cost should recurse. |
| + */ |
| + |
| +/* Worker routine for avr32_rtx_costs. */ |
| +static inline int |
| +avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED, |
| + enum rtx_code outer ATTRIBUTE_UNUSED) |
| +{ |
| + enum machine_mode mode = GET_MODE (x); |
| + |
| + switch (GET_CODE (x)) |
| + { |
| + case MEM: |
| + /* Using pre decrement / post increment memory operations on the |
| + avr32_uc architecture means that two writebacks must be performed |
| + and hence two cycles are needed. */ |
| + if (!optimize_size |
| + && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD |
| + && TARGET_ARCH_UC |
| + && (GET_CODE (XEXP (x, 0)) == PRE_DEC |
| + || GET_CODE (XEXP (x, 0)) == POST_INC)) |
| + return COSTS_N_INSNS (5); |
| + |
| + /* Memory costs quite a lot for the first word, but subsequent words |
| + load at the equivalent of a single insn each. */ |
| + if (GET_MODE_SIZE (mode) > UNITS_PER_WORD) |
| + return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD)); |
| + |
| + return COSTS_N_INSNS (4); |
| + case SYMBOL_REF: |
| + case CONST: |
| + /* These are valid for the pseudo insns: lda.w and call which operates |
| + on direct addresses. We assume that the cost of a lda.w is the same |
| + as the cost of a ld.w insn. */ |
| + return (outer == SET) ? COSTS_N_INSNS (4) : COSTS_N_INSNS (1); |
| + case DIV: |
| + case MOD: |
| + case UDIV: |
| + case UMOD: |
| + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16); |
| + |
| + case ROTATE: |
| + case ROTATERT: |
| + if (mode == TImode) |
| + return COSTS_N_INSNS (100); |
| + |
| + if (mode == DImode) |
| + return COSTS_N_INSNS (10); |
| + return COSTS_N_INSNS (4); |
| + case ASHIFT: |
| + case LSHIFTRT: |
| + case ASHIFTRT: |
| + case NOT: |
| + if (mode == TImode) |
| + return COSTS_N_INSNS (10); |
| + |
| + if (mode == DImode) |
| + return COSTS_N_INSNS (4); |
| + return COSTS_N_INSNS (1); |
| + case PLUS: |
| + case MINUS: |
| + case NEG: |
| + case COMPARE: |
| + case ABS: |
| + if (GET_MODE_CLASS (mode) == MODE_FLOAT) |
| + return COSTS_N_INSNS (100); |
| + |
| + if (mode == TImode) |
| + return COSTS_N_INSNS (50); |
| + |
| + if (mode == DImode) |
| + return COSTS_N_INSNS (2); |
| + return COSTS_N_INSNS (1); |
| + |
| + case MULT: |
| + { |
| + if (GET_MODE_CLASS (mode) == MODE_FLOAT) |
| + return COSTS_N_INSNS (300); |
| + |
| + if (mode == TImode) |
| + return COSTS_N_INSNS (16); |
| + |
| + if (mode == DImode) |
| + return COSTS_N_INSNS (4); |
| + |
| + if (mode == HImode) |
| + return COSTS_N_INSNS (2); |
| + |
| + return COSTS_N_INSNS (3); |
| + } |
| + case IF_THEN_ELSE: |
| + if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC) |
| + return COSTS_N_INSNS (4); |
| + return COSTS_N_INSNS (1); |
| + case SIGN_EXTEND: |
| + case ZERO_EXTEND: |
| + /* Sign/Zero extensions of registers cost quite much since these |
| + instrcutions only take one register operand which means that gcc |
| + often must insert some move instrcutions */ |
| + if (mode == QImode || mode == HImode) |
| + return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1)); |
| + return COSTS_N_INSNS (4); |
| + case UNSPEC: |
| + /* divmod operations */ |
| + if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL |
| + || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL) |
| + { |
| + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16); |
| + } |
| + /* Fallthrough */ |
| + default: |
| + return COSTS_N_INSNS (1); |
| + } |
| +} |
| + |
| +static bool |
| +avr32_rtx_costs (rtx x, int code, int outer_code, int *total) |
| +{ |
| + *total = avr32_rtx_costs_1 (x, code, outer_code); |
| + return true; |
| +} |
| + |
| + |
| +bool |
| +avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED) |
| +{ |
| + /* Do not want symbols in the constant pool when compiling pic or if using |
| + address pseudo instructions. */ |
| + return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS) |
| + && avr32_find_symbol (x) != NULL_RTX); |
| +} |
| + |
| + |
| +/* Table of machine attributes. */ |
| +const struct attribute_spec avr32_attribute_table[] = { |
| + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ |
| + /* Interrupt Service Routines have special prologue and epilogue |
| + requirements. */ |
| + {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute}, |
| + {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute}, |
| + {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute}, |
| + {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute}, |
| + {"rmw_addressable", 0, 0, true, false, false, NULL}, |
| + {NULL, 0, 0, false, false, false, NULL} |
| +}; |
| + |
| + |
| +typedef struct |
| +{ |
| + const char *const arg; |
| + const unsigned long return_value; |
| +} |
| +isr_attribute_arg; |
| + |
| +static const isr_attribute_arg isr_attribute_args[] = { |
| + {"FULL", AVR32_FT_ISR_FULL}, |
| + {"full", AVR32_FT_ISR_FULL}, |
| + {"HALF", AVR32_FT_ISR_HALF}, |
| + {"half", AVR32_FT_ISR_HALF}, |
| + {"NONE", AVR32_FT_ISR_NONE}, |
| + {"none", AVR32_FT_ISR_NONE}, |
| + {"UNDEF", AVR32_FT_ISR_NONE}, |
| + {"undef", AVR32_FT_ISR_NONE}, |
| + {"SWI", AVR32_FT_ISR_NONE}, |
| + {"swi", AVR32_FT_ISR_NONE}, |
| + {NULL, AVR32_FT_ISR_NONE} |
| +}; |
| + |
| +/* Returns the (interrupt) function type of the current |
| + function, or AVR32_FT_UNKNOWN if the type cannot be determined. */ |
| + |
| +static unsigned long |
| +avr32_isr_value (tree argument) |
| +{ |
| + const isr_attribute_arg *ptr; |
| + const char *arg; |
| + |
| + /* No argument - default to ISR_NONE. */ |
| + if (argument == NULL_TREE) |
| + return AVR32_FT_ISR_NONE; |
| + |
| + /* Get the value of the argument. */ |
| + if (TREE_VALUE (argument) == NULL_TREE |
| + || TREE_CODE (TREE_VALUE (argument)) != STRING_CST) |
| + return AVR32_FT_UNKNOWN; |
| + |
| + arg = TREE_STRING_POINTER (TREE_VALUE (argument)); |
| + |
| + /* Check it against the list of known arguments. */ |
| + for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++) |
| + if (streq (arg, ptr->arg)) |
| + return ptr->return_value; |
| + |
| + /* An unrecognized interrupt type. */ |
| + return AVR32_FT_UNKNOWN; |
| +} |
| + |
| + |
| + |
| +/* |
| +These hooks specify assembly directives for creating certain kinds |
| +of integer object. The TARGET_ASM_BYTE_OP directive creates a |
| +byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an |
| +aligned two-byte object, and so on. Any of the hooks may be |
| +NULL, indicating that no suitable directive is available. |
| + |
| +The compiler will print these strings at the start of a new line, |
| +followed immediately by the object's initial value. In most cases, |
| +the string should contain a tab, a pseudo-op, and then another tab. |
| +*/ |
| +#undef TARGET_ASM_BYTE_OP |
| +#define TARGET_ASM_BYTE_OP "\t.byte\t" |
| +#undef TARGET_ASM_ALIGNED_HI_OP |
| +#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t" |
| +#undef TARGET_ASM_ALIGNED_SI_OP |
| +#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t" |
| +#undef TARGET_ASM_ALIGNED_DI_OP |
| +#define TARGET_ASM_ALIGNED_DI_OP NULL |
| +#undef TARGET_ASM_ALIGNED_TI_OP |
| +#define TARGET_ASM_ALIGNED_TI_OP NULL |
| +#undef TARGET_ASM_UNALIGNED_HI_OP |
| +#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t" |
| +#undef TARGET_ASM_UNALIGNED_SI_OP |
| +#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t" |
| +#undef TARGET_ASM_UNALIGNED_DI_OP |
| +#define TARGET_ASM_UNALIGNED_DI_OP NULL |
| +#undef TARGET_ASM_UNALIGNED_TI_OP |
| +#define TARGET_ASM_UNALIGNED_TI_OP NULL |
| + |
| +#undef TARGET_ASM_OUTPUT_MI_THUNK |
| +#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk |
| + |
| +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK |
| +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true |
| + |
| +static void |
| +avr32_output_mi_thunk (FILE * file, |
| + tree thunk ATTRIBUTE_UNUSED, |
| + HOST_WIDE_INT delta, |
| + HOST_WIDE_INT vcall_offset, tree function) |
| + { |
| + int mi_delta = delta; |
| + int this_regno = |
| + (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function) ? |
| + INTERNAL_REGNUM (11) : INTERNAL_REGNUM (12)); |
| + |
| + |
| + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21") |
| + || vcall_offset) |
| + { |
| + fputs ("\tpushm\tlr\n", file); |
| + } |
| + |
| + |
| + if (mi_delta != 0) |
| + { |
| + if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")) |
| + { |
| + fprintf (file, "\tsub\t%s, %d\n", reg_names[this_regno], -mi_delta); |
| + } |
| + else |
| + { |
| + /* Immediate is larger than k21 we must make us a temp register by |
| + pushing a register to the stack. */ |
| + fprintf (file, "\tmov\tlr, lo(%d)\n", mi_delta); |
| + fprintf (file, "\torh\tlr, hi(%d)\n", mi_delta); |
| + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]); |
| + } |
| + } |
| + |
| + |
| + if (vcall_offset != 0) |
| + { |
| + fprintf (file, "\tld.w\tlr, %s[0]\n", reg_names[this_regno]); |
| + fprintf (file, "\tld.w\tlr, lr[%i]\n", (int) vcall_offset); |
| + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]); |
| + } |
| + |
| + |
| + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21") |
| + || vcall_offset) |
| + { |
| + fputs ("\tpopm\tlr\n", file); |
| + } |
| + |
| + /* Jump to the function. We assume that we can use an rjmp since the |
| + function to jump to is local and probably not too far away from |
| + the thunk. If this assumption proves to be wrong we could implement |
| + this jump by calculating the offset between the jump source and destination |
| + and put this in the constant pool and then perform an add to pc. |
| + This would also be legitimate PIC code. But for now we hope that an rjmp |
| + will be sufficient... |
| + */ |
| + fputs ("\trjmp\t", file); |
| + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0)); |
| + fputc ('\n', file); |
| + } |
| + |
| + |
| +/* Implements target hook vector_mode_supported. */ |
| +bool |
| +avr32_vector_mode_supported (enum machine_mode mode) |
| +{ |
| + if ((mode == V2HImode) || (mode == V4QImode)) |
| + return true; |
| + |
| + return false; |
| +} |
| + |
| + |
| +#undef TARGET_INIT_LIBFUNCS |
| +#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs |
| + |
| +#undef TARGET_INIT_BUILTINS |
| +#define TARGET_INIT_BUILTINS avr32_init_builtins |
| + |
| +#undef TARGET_EXPAND_BUILTIN |
| +#define TARGET_EXPAND_BUILTIN avr32_expand_builtin |
| + |
| +tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int, |
| + void_ftype_ptr_int; |
| +tree void_ftype_int, void_ftype_void, int_ftype_ptr_int; |
| +tree short_ftype_short, int_ftype_int_short, int_ftype_short_short, |
| + short_ftype_short_short; |
| +tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short; |
| +tree void_ftype_int_int_int_int_int, void_ftype_int_int_int; |
| +tree longlong_ftype_int_int, void_ftype_int_int_longlong; |
| +tree int_ftype_int_int_int, longlong_ftype_longlong_int_short; |
| +tree longlong_ftype_longlong_short_short, int_ftype_int_short_short; |
| + |
| +#define def_builtin(NAME, TYPE, CODE) \ |
| + lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \ |
| + BUILT_IN_MD, NULL, NULL_TREE) |
| + |
| +#define def_mbuiltin(MASK, NAME, TYPE, CODE) \ |
| + do \ |
| + { \ |
| + if ((MASK)) \ |
| + lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \ |
| + BUILT_IN_MD, NULL, NULL_TREE); \ |
| + } \ |
| + while (0) |
| + |
| +struct builtin_description |
| +{ |
| + const unsigned int mask; |
| + const enum insn_code icode; |
| + const char *const name; |
| + const int code; |
| + const enum rtx_code comparison; |
| + const unsigned int flag; |
| + const tree *ftype; |
| +}; |
| + |
| +static const struct builtin_description bdesc_2arg[] = { |
| +#define DSP_BUILTIN(code, builtin, ftype) \ |
| + { 1, CODE_FOR_##code, "__builtin_" #code , \ |
| + AVR32_BUILTIN_##builtin, 0, 0, ftype } |
| + |
| + DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short), |
| + DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short), |
| + DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short), |
| + DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short), |
| + DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short), |
| + DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short), |
| + DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short), |
| + DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int), |
| + DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int), |
| + DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short), |
| + DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short) |
| +}; |
| + |
| + |
| +void |
| +avr32_init_builtins (void) |
| +{ |
| + unsigned int i; |
| + const struct builtin_description *d; |
| + tree endlink = void_list_node; |
| + tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink); |
| + tree longlong_endlink = |
| + tree_cons (NULL_TREE, long_long_integer_type_node, endlink); |
| + tree short_endlink = |
| + tree_cons (NULL_TREE, short_integer_type_node, endlink); |
| + tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink); |
| + |
| + /* int func (int) */ |
| + int_ftype_int = build_function_type (integer_type_node, int_endlink); |
| + |
| + /* short func (short) */ |
| + short_ftype_short |
| + = build_function_type (short_integer_type_node, short_endlink); |
| + |
| + /* short func (short, short) */ |
| + short_ftype_short_short |
| + = build_function_type (short_integer_type_node, |
| + tree_cons (NULL_TREE, short_integer_type_node, |
| + short_endlink)); |
| + |
| + /* long long func (long long, short, short) */ |
| + longlong_ftype_longlong_short_short |
| + = build_function_type (long_long_integer_type_node, |
| + tree_cons (NULL_TREE, long_long_integer_type_node, |
| + tree_cons (NULL_TREE, |
| + short_integer_type_node, |
| + short_endlink))); |
| + |
| + /* long long func (short, short) */ |
| + longlong_ftype_short_short |
| + = build_function_type (long_long_integer_type_node, |
| + tree_cons (NULL_TREE, short_integer_type_node, |
| + short_endlink)); |
| + |
| + /* int func (int, int) */ |
| + int_ftype_int_int |
| + = build_function_type (integer_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + int_endlink)); |
| + |
| + /* long long func (int, int) */ |
| + longlong_ftype_int_int |
| + = build_function_type (long_long_integer_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + int_endlink)); |
| + |
| + /* long long int func (long long, int, short) */ |
| + longlong_ftype_longlong_int_short |
| + = build_function_type (long_long_integer_type_node, |
| + tree_cons (NULL_TREE, long_long_integer_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + short_endlink))); |
| + |
| + /* long long int func (int, short) */ |
| + longlong_ftype_int_short |
| + = build_function_type (long_long_integer_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + short_endlink)); |
| + |
| + /* int func (int, short, short) */ |
| + int_ftype_int_short_short |
| + = build_function_type (integer_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + tree_cons (NULL_TREE, |
| + short_integer_type_node, |
| + short_endlink))); |
| + |
| + /* int func (short, short) */ |
| + int_ftype_short_short |
| + = build_function_type (integer_type_node, |
| + tree_cons (NULL_TREE, short_integer_type_node, |
| + short_endlink)); |
| + |
| + /* int func (int, short) */ |
| + int_ftype_int_short |
| + = build_function_type (integer_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + short_endlink)); |
| + |
| + /* void func (int, int) */ |
| + void_ftype_int_int |
| + = build_function_type (void_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + int_endlink)); |
| + |
| + /* void func (int, int, int) */ |
| + void_ftype_int_int_int |
| + = build_function_type (void_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + int_endlink))); |
| + |
| + /* void func (int, int, long long) */ |
| + void_ftype_int_int_longlong |
| + = build_function_type (void_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + longlong_endlink))); |
| + |
| + /* void func (int, int, int, int, int) */ |
| + void_ftype_int_int_int_int_int |
| + = build_function_type (void_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + tree_cons (NULL_TREE, |
| + integer_type_node, |
| + tree_cons |
| + (NULL_TREE, |
| + integer_type_node, |
| + int_endlink))))); |
| + |
| + /* void func (void *, int) */ |
| + void_ftype_ptr_int |
| + = build_function_type (void_type_node, |
| + tree_cons (NULL_TREE, ptr_type_node, int_endlink)); |
| + |
| + /* void func (int) */ |
| + void_ftype_int = build_function_type (void_type_node, int_endlink); |
| + |
| + /* void func (void) */ |
| + void_ftype_void = build_function_type (void_type_node, void_endlink); |
| + |
| + /* int func (void) */ |
| + int_ftype_void = build_function_type (integer_type_node, void_endlink); |
| + |
| + /* int func (void *, int) */ |
| + int_ftype_ptr_int |
| + = build_function_type (integer_type_node, |
| + tree_cons (NULL_TREE, ptr_type_node, int_endlink)); |
| + |
| + /* int func (int, int, int) */ |
| + int_ftype_int_int_int |
| + = build_function_type (integer_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + tree_cons (NULL_TREE, integer_type_node, |
| + int_endlink))); |
| + |
| + /* Initialize avr32 builtins. */ |
| + def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR); |
| + def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR); |
| + def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR); |
| + def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR); |
| + def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE); |
| + def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC); |
| + def_builtin ("__builtin_ssrf", void_ftype_int, AVR32_BUILTIN_SSRF); |
| + def_builtin ("__builtin_csrf", void_ftype_int, AVR32_BUILTIN_CSRF); |
| + def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR); |
| + def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS); |
| + def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW); |
| + def_builtin ("__builtin_breakpoint", void_ftype_void, |
| + AVR32_BUILTIN_BREAKPOINT); |
| + def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG); |
| + def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI); |
| + def_builtin ("__builtin_bswap_16", short_ftype_short, |
| + AVR32_BUILTIN_BSWAP16); |
| + def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32); |
| + def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int, |
| + AVR32_BUILTIN_COP); |
| + def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W); |
| + def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int, |
| + AVR32_BUILTIN_MVRC_W); |
| + def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int, |
| + AVR32_BUILTIN_MVCR_D); |
| + def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong, |
| + AVR32_BUILTIN_MVRC_D); |
| + def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS); |
| + def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU); |
| + def_builtin ("__builtin_satrnds", int_ftype_int_int_int, |
| + AVR32_BUILTIN_SATRNDS); |
| + def_builtin ("__builtin_satrndu", int_ftype_int_int_int, |
| + AVR32_BUILTIN_SATRNDU); |
| + def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR); |
| + def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR); |
| + def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short, |
| + AVR32_BUILTIN_MACSATHH_W); |
| + def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short, |
| + AVR32_BUILTIN_MACWH_D); |
| + def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short, |
| + AVR32_BUILTIN_MACHH_D); |
| + def_builtin ("__builtin_mems", void_ftype_ptr_int, AVR32_BUILTIN_MEMS); |
| + def_builtin ("__builtin_memt", void_ftype_ptr_int, AVR32_BUILTIN_MEMT); |
| + def_builtin ("__builtin_memc", void_ftype_ptr_int, AVR32_BUILTIN_MEMC); |
| + |
| + /* Add all builtins that are more or less simple operations on two |
| + operands. */ |
| + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++) |
| + { |
| + /* Use one of the operands; the target can have a different mode for |
| + mask-generating compares. */ |
| + |
| + if (d->name == 0) |
| + continue; |
| + |
| + def_mbuiltin (d->mask, d->name, *(d->ftype), d->code); |
| + } |
| +} |
| + |
| + |
| +/* Subroutine of avr32_expand_builtin to take care of binop insns. */ |
| + |
| +static rtx |
| +avr32_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target) |
| +{ |
| + rtx pat; |
| + tree arg0 = TREE_VALUE (arglist); |
| + tree arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| + rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| + enum machine_mode tmode = insn_data[icode].operand[0].mode; |
| + enum machine_mode mode0 = insn_data[icode].operand[1].mode; |
| + enum machine_mode mode1 = insn_data[icode].operand[2].mode; |
| + |
| + if (!target |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + target = gen_reg_rtx (tmode); |
| + |
| + /* In case the insn wants input operands in modes different from the |
| + result, abort. */ |
| + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| + { |
| + /* If op0 is already a reg we must cast it to the correct mode. */ |
| + if (REG_P (op0)) |
| + op0 = convert_to_mode (mode0, op0, 1); |
| + else |
| + op0 = copy_to_mode_reg (mode0, op0); |
| + } |
| + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1)) |
| + { |
| + /* If op1 is already a reg we must cast it to the correct mode. */ |
| + if (REG_P (op1)) |
| + op1 = convert_to_mode (mode1, op1, 1); |
| + else |
| + op1 = copy_to_mode_reg (mode1, op1); |
| + } |
| + pat = GEN_FCN (icode) (target, op0, op1); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return target; |
| +} |
| + |
| +/* Expand an expression EXP that calls a built-in function, |
| + with result going to TARGET if that's convenient |
| + (and in mode MODE if that's convenient). |
| + SUBTARGET may be used as the target for computing one of EXP's operands. |
| + IGNORE is nonzero if the value is to be ignored. */ |
| + |
| +rtx |
| +avr32_expand_builtin (tree exp, |
| + rtx target, |
| + rtx subtarget ATTRIBUTE_UNUSED, |
| + enum machine_mode mode ATTRIBUTE_UNUSED, |
| + int ignore ATTRIBUTE_UNUSED) |
| +{ |
| + const struct builtin_description *d; |
| + unsigned int i; |
| + enum insn_code icode = 0; |
| + tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); |
| + tree arglist = TREE_OPERAND (exp, 1); |
| + tree arg0, arg1, arg2; |
| + rtx op0, op1, op2, pat; |
| + enum machine_mode tmode, mode0, mode1; |
| + enum machine_mode arg0_mode; |
| + int fcode = DECL_FUNCTION_CODE (fndecl); |
| + |
| + switch (fcode) |
| + { |
| + default: |
| + break; |
| + |
| + case AVR32_BUILTIN_SATS: |
| + case AVR32_BUILTIN_SATU: |
| + case AVR32_BUILTIN_SATRNDS: |
| + case AVR32_BUILTIN_SATRNDU: |
| + { |
| + const char *fname; |
| + switch (fcode) |
| + { |
| + default: |
| + case AVR32_BUILTIN_SATS: |
| + icode = CODE_FOR_sats; |
| + fname = "sats"; |
| + break; |
| + case AVR32_BUILTIN_SATU: |
| + icode = CODE_FOR_satu; |
| + fname = "satu"; |
| + break; |
| + case AVR32_BUILTIN_SATRNDS: |
| + icode = CODE_FOR_satrnds; |
| + fname = "satrnds"; |
| + break; |
| + case AVR32_BUILTIN_SATRNDU: |
| + icode = CODE_FOR_satrndu; |
| + fname = "satrndu"; |
| + break; |
| + } |
| + |
| + arg0 = TREE_VALUE (arglist); |
| + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); |
| + |
| + tmode = insn_data[icode].operand[0].mode; |
| + |
| + |
| + if (target == 0 |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + target = gen_reg_rtx (tmode); |
| + |
| + |
| + if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0))) |
| + { |
| + op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0); |
| + } |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode)) |
| + { |
| + error ("Parameter 2 to __builtin_%s should be a constant number.", |
| + fname); |
| + return NULL_RTX; |
| + } |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op2, SImode)) |
| + { |
| + error ("Parameter 3 to __builtin_%s should be a constant number.", |
| + fname); |
| + return NULL_RTX; |
| + } |
| + |
| + emit_move_insn (target, op0); |
| + pat = GEN_FCN (icode) (target, op1, op2); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + |
| + return target; |
| + } |
| + case AVR32_BUILTIN_MUSTR: |
| + icode = CODE_FOR_mustr; |
| + tmode = insn_data[icode].operand[0].mode; |
| + |
| + if (target == 0 |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + target = gen_reg_rtx (tmode); |
| + pat = GEN_FCN (icode) (target); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return target; |
| + |
| + case AVR32_BUILTIN_MFSR: |
| + icode = CODE_FOR_mfsr; |
| + arg0 = TREE_VALUE (arglist); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + tmode = insn_data[icode].operand[0].mode; |
| + mode0 = insn_data[icode].operand[1].mode; |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| + { |
| + error ("Parameter 1 to __builtin_mfsr must be a constant number"); |
| + } |
| + |
| + if (target == 0 |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + target = gen_reg_rtx (tmode); |
| + pat = GEN_FCN (icode) (target, op0); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return target; |
| + case AVR32_BUILTIN_MTSR: |
| + icode = CODE_FOR_mtsr; |
| + arg0 = TREE_VALUE (arglist); |
| + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| + mode0 = insn_data[icode].operand[0].mode; |
| + mode1 = insn_data[icode].operand[1].mode; |
| + |
| + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0)) |
| + { |
| + error ("Parameter 1 to __builtin_mtsr must be a constant number"); |
| + return gen_reg_rtx (mode0); |
| + } |
| + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1)) |
| + op1 = copy_to_mode_reg (mode1, op1); |
| + pat = GEN_FCN (icode) (op0, op1); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return NULL_RTX; |
| + case AVR32_BUILTIN_MFDR: |
| + icode = CODE_FOR_mfdr; |
| + arg0 = TREE_VALUE (arglist); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + tmode = insn_data[icode].operand[0].mode; |
| + mode0 = insn_data[icode].operand[1].mode; |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| + { |
| + error ("Parameter 1 to __builtin_mfdr must be a constant number"); |
| + } |
| + |
| + if (target == 0 |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + target = gen_reg_rtx (tmode); |
| + pat = GEN_FCN (icode) (target, op0); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return target; |
| + case AVR32_BUILTIN_MTDR: |
| + icode = CODE_FOR_mtdr; |
| + arg0 = TREE_VALUE (arglist); |
| + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| + mode0 = insn_data[icode].operand[0].mode; |
| + mode1 = insn_data[icode].operand[1].mode; |
| + |
| + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0)) |
| + { |
| + error ("Parameter 1 to __builtin_mtdr must be a constant number"); |
| + return gen_reg_rtx (mode0); |
| + } |
| + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1)) |
| + op1 = copy_to_mode_reg (mode1, op1); |
| + pat = GEN_FCN (icode) (op0, op1); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return NULL_RTX; |
| + case AVR32_BUILTIN_CACHE: |
| + icode = CODE_FOR_cache; |
| + arg0 = TREE_VALUE (arglist); |
| + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| + mode0 = insn_data[icode].operand[0].mode; |
| + mode1 = insn_data[icode].operand[1].mode; |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1)) |
| + { |
| + error ("Parameter 2 to __builtin_cache must be a constant number"); |
| + return gen_reg_rtx (mode1); |
| + } |
| + |
| + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0)) |
| + op0 = copy_to_mode_reg (mode0, op0); |
| + |
| + pat = GEN_FCN (icode) (op0, op1); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return NULL_RTX; |
| + case AVR32_BUILTIN_SYNC: |
| + case AVR32_BUILTIN_MUSFR: |
| + case AVR32_BUILTIN_SSRF: |
| + case AVR32_BUILTIN_CSRF: |
| + { |
| + const char *fname; |
| + switch (fcode) |
| + { |
| + default: |
| + case AVR32_BUILTIN_SYNC: |
| + icode = CODE_FOR_sync; |
| + fname = "sync"; |
| + break; |
| + case AVR32_BUILTIN_MUSFR: |
| + icode = CODE_FOR_musfr; |
| + fname = "musfr"; |
| + break; |
| + case AVR32_BUILTIN_SSRF: |
| + icode = CODE_FOR_ssrf; |
| + fname = "ssrf"; |
| + break; |
| + case AVR32_BUILTIN_CSRF: |
| + icode = CODE_FOR_csrf; |
| + fname = "csrf"; |
| + break; |
| + } |
| + |
| + arg0 = TREE_VALUE (arglist); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + mode0 = insn_data[icode].operand[0].mode; |
| + |
| + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0)) |
| + { |
| + if (icode == CODE_FOR_musfr) |
| + op0 = copy_to_mode_reg (mode0, op0); |
| + else |
| + { |
| + error ("Parameter to __builtin_%s is illegal.", fname); |
| + return gen_reg_rtx (mode0); |
| + } |
| + } |
| + pat = GEN_FCN (icode) (op0); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return NULL_RTX; |
| + } |
| + case AVR32_BUILTIN_TLBR: |
| + icode = CODE_FOR_tlbr; |
| + pat = GEN_FCN (icode) (NULL_RTX); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return NULL_RTX; |
| + case AVR32_BUILTIN_TLBS: |
| + icode = CODE_FOR_tlbs; |
| + pat = GEN_FCN (icode) (NULL_RTX); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return NULL_RTX; |
| + case AVR32_BUILTIN_TLBW: |
| + icode = CODE_FOR_tlbw; |
| + pat = GEN_FCN (icode) (NULL_RTX); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return NULL_RTX; |
| + case AVR32_BUILTIN_BREAKPOINT: |
| + icode = CODE_FOR_breakpoint; |
| + pat = GEN_FCN (icode) (NULL_RTX); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return NULL_RTX; |
| + case AVR32_BUILTIN_XCHG: |
| + icode = CODE_FOR_sync_lock_test_and_setsi; |
| + arg0 = TREE_VALUE (arglist); |
| + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| + tmode = insn_data[icode].operand[0].mode; |
| + mode0 = insn_data[icode].operand[1].mode; |
| + mode1 = insn_data[icode].operand[2].mode; |
| + |
| + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1)) |
| + { |
| + op1 = copy_to_mode_reg (mode1, op1); |
| + } |
| + |
| + op0 = force_reg (GET_MODE (op0), op0); |
| + op0 = gen_rtx_MEM (GET_MODE (op0), op0); |
| + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| + { |
| + error |
| + ("Parameter 1 to __builtin_xchg must be a pointer to an integer."); |
| + } |
| + |
| + if (target == 0 |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + target = gen_reg_rtx (tmode); |
| + pat = GEN_FCN (icode) (target, op0, op1); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return target; |
| + case AVR32_BUILTIN_LDXI: |
| + icode = CODE_FOR_ldxi; |
| + arg0 = TREE_VALUE (arglist); |
| + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); |
| + tmode = insn_data[icode].operand[0].mode; |
| + mode0 = insn_data[icode].operand[1].mode; |
| + mode1 = insn_data[icode].operand[2].mode; |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| + { |
| + op0 = copy_to_mode_reg (mode0, op0); |
| + } |
| + |
| + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1)) |
| + { |
| + op1 = copy_to_mode_reg (mode1, op1); |
| + } |
| + |
| + if (!(*insn_data[icode].operand[3].predicate) (op2, SImode)) |
| + { |
| + error |
| + ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)"); |
| + return gen_reg_rtx (mode0); |
| + } |
| + |
| + if (target == 0 |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + target = gen_reg_rtx (tmode); |
| + pat = GEN_FCN (icode) (target, op0, op1, op2); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return target; |
| + case AVR32_BUILTIN_BSWAP16: |
| + { |
| + icode = CODE_FOR_bswap_16; |
| + arg0 = TREE_VALUE (arglist); |
| + arg0_mode = TYPE_MODE (TREE_TYPE (arg0)); |
| + mode0 = insn_data[icode].operand[1].mode; |
| + if (arg0_mode != mode0) |
| + arg0 = build1 (NOP_EXPR, |
| + (*lang_hooks.types.type_for_mode) (mode0, 0), arg0); |
| + |
| + op0 = expand_expr (arg0, NULL_RTX, HImode, 0); |
| + tmode = insn_data[icode].operand[0].mode; |
| + |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| + { |
| + if ( CONST_INT_P (op0) ) |
| + { |
| + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x00ff) << 8) | |
| + ((INTVAL (op0)&0xff00) >> 8) ); |
| + /* Sign extend 16-bit value to host wide int */ |
| + val <<= (HOST_BITS_PER_WIDE_INT - 16); |
| + val >>= (HOST_BITS_PER_WIDE_INT - 16); |
| + op0 = GEN_INT(val); |
| + if (target == 0 |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + target = gen_reg_rtx (tmode); |
| + emit_move_insn(target, op0); |
| + return target; |
| + } |
| + else |
| + op0 = copy_to_mode_reg (mode0, op0); |
| + } |
| + |
| + if (target == 0 |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + { |
| + target = gen_reg_rtx (tmode); |
| + } |
| + |
| + |
| + pat = GEN_FCN (icode) (target, op0); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + |
| + return target; |
| + } |
| + case AVR32_BUILTIN_BSWAP32: |
| + { |
| + icode = CODE_FOR_bswap_32; |
| + arg0 = TREE_VALUE (arglist); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + tmode = insn_data[icode].operand[0].mode; |
| + mode0 = insn_data[icode].operand[1].mode; |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| + { |
| + if ( CONST_INT_P (op0) ) |
| + { |
| + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x000000ff) << 24) | |
| + ((INTVAL (op0)&0x0000ff00) << 8) | |
| + ((INTVAL (op0)&0x00ff0000) >> 8) | |
| + ((INTVAL (op0)&0xff000000) >> 24) ); |
| + /* Sign extend 32-bit value to host wide int */ |
| + val <<= (HOST_BITS_PER_WIDE_INT - 32); |
| + val >>= (HOST_BITS_PER_WIDE_INT - 32); |
| + op0 = GEN_INT(val); |
| + if (target == 0 |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + target = gen_reg_rtx (tmode); |
| + emit_move_insn(target, op0); |
| + return target; |
| + } |
| + else |
| + op0 = copy_to_mode_reg (mode0, op0); |
| + } |
| + |
| + if (target == 0 |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + target = gen_reg_rtx (tmode); |
| + |
| + |
| + pat = GEN_FCN (icode) (target, op0); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + |
| + return target; |
| + } |
| + case AVR32_BUILTIN_MVCR_W: |
| + case AVR32_BUILTIN_MVCR_D: |
| + { |
| + arg0 = TREE_VALUE (arglist); |
| + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| + |
| + if (fcode == AVR32_BUILTIN_MVCR_W) |
| + icode = CODE_FOR_mvcrsi; |
| + else |
| + icode = CODE_FOR_mvcrdi; |
| + |
| + tmode = insn_data[icode].operand[0].mode; |
| + |
| + if (target == 0 |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + target = gen_reg_rtx (tmode); |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op0, SImode)) |
| + { |
| + error |
| + ("Parameter 1 to __builtin_cop is not a valid coprocessor number."); |
| + error ("Number should be between 0 and 7."); |
| + return NULL_RTX; |
| + } |
| + |
| + if (!(*insn_data[icode].operand[2].predicate) (op1, SImode)) |
| + { |
| + error |
| + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number."); |
| + error ("Number should be between 0 and 15."); |
| + return NULL_RTX; |
| + } |
| + |
| + pat = GEN_FCN (icode) (target, op0, op1); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + |
| + return target; |
| + } |
| + case AVR32_BUILTIN_MACSATHH_W: |
| + case AVR32_BUILTIN_MACWH_D: |
| + case AVR32_BUILTIN_MACHH_D: |
| + { |
| + arg0 = TREE_VALUE (arglist); |
| + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); |
| + |
| + icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w : |
| + (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d : |
| + CODE_FOR_machh_d); |
| + |
| + tmode = insn_data[icode].operand[0].mode; |
| + mode0 = insn_data[icode].operand[1].mode; |
| + mode1 = insn_data[icode].operand[2].mode; |
| + |
| + |
| + if (!target |
| + || GET_MODE (target) != tmode |
| + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) |
| + target = gen_reg_rtx (tmode); |
| + |
| + if (!(*insn_data[icode].operand[0].predicate) (op0, tmode)) |
| + { |
| + /* If op0 is already a reg we must cast it to the correct mode. */ |
| + if (REG_P (op0)) |
| + op0 = convert_to_mode (tmode, op0, 1); |
| + else |
| + op0 = copy_to_mode_reg (tmode, op0); |
| + } |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op1, mode0)) |
| + { |
| + /* If op1 is already a reg we must cast it to the correct mode. */ |
| + if (REG_P (op1)) |
| + op1 = convert_to_mode (mode0, op1, 1); |
| + else |
| + op1 = copy_to_mode_reg (mode0, op1); |
| + } |
| + |
| + if (!(*insn_data[icode].operand[2].predicate) (op2, mode1)) |
| + { |
| + /* If op1 is already a reg we must cast it to the correct mode. */ |
| + if (REG_P (op2)) |
| + op2 = convert_to_mode (mode1, op2, 1); |
| + else |
| + op2 = copy_to_mode_reg (mode1, op2); |
| + } |
| + |
| + emit_move_insn (target, op0); |
| + |
| + pat = GEN_FCN (icode) (target, op1, op2); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return target; |
| + } |
| + case AVR32_BUILTIN_MVRC_W: |
| + case AVR32_BUILTIN_MVRC_D: |
| + { |
| + arg0 = TREE_VALUE (arglist); |
| + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); |
| + |
| + if (fcode == AVR32_BUILTIN_MVRC_W) |
| + icode = CODE_FOR_mvrcsi; |
| + else |
| + icode = CODE_FOR_mvrcdi; |
| + |
| + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode)) |
| + { |
| + error ("Parameter 1 is not a valid coprocessor number."); |
| + error ("Number should be between 0 and 7."); |
| + return NULL_RTX; |
| + } |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode)) |
| + { |
| + error ("Parameter 2 is not a valid coprocessor register number."); |
| + error ("Number should be between 0 and 15."); |
| + return NULL_RTX; |
| + } |
| + |
| + if (GET_CODE (op2) == CONST_INT |
| + || GET_CODE (op2) == CONST |
| + || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF) |
| + { |
| + op2 = force_const_mem (insn_data[icode].operand[2].mode, op2); |
| + } |
| + |
| + if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2))) |
| + op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2); |
| + |
| + |
| + pat = GEN_FCN (icode) (op0, op1, op2); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + |
| + return NULL_RTX; |
| + } |
| + case AVR32_BUILTIN_COP: |
| + { |
| + rtx op3, op4; |
| + tree arg3, arg4; |
| + icode = CODE_FOR_cop; |
| + arg0 = TREE_VALUE (arglist); |
| + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); |
| + arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))); |
| + arg4 = |
| + TREE_VALUE (TREE_CHAIN |
| + (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))))); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); |
| + op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0); |
| + op4 = expand_expr (arg4, NULL_RTX, VOIDmode, 0); |
| + |
| + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode)) |
| + { |
| + error |
| + ("Parameter 1 to __builtin_cop is not a valid coprocessor number."); |
| + error ("Number should be between 0 and 7."); |
| + return NULL_RTX; |
| + } |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode)) |
| + { |
| + error |
| + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number."); |
| + error ("Number should be between 0 and 15."); |
| + return NULL_RTX; |
| + } |
| + |
| + if (!(*insn_data[icode].operand[2].predicate) (op2, SImode)) |
| + { |
| + error |
| + ("Parameter 3 to __builtin_cop is not a valid coprocessor register number."); |
| + error ("Number should be between 0 and 15."); |
| + return NULL_RTX; |
| + } |
| + |
| + if (!(*insn_data[icode].operand[3].predicate) (op3, SImode)) |
| + { |
| + error |
| + ("Parameter 4 to __builtin_cop is not a valid coprocessor register number."); |
| + error ("Number should be between 0 and 15."); |
| + return NULL_RTX; |
| + } |
| + |
| + if (!(*insn_data[icode].operand[4].predicate) (op4, SImode)) |
| + { |
| + error |
| + ("Parameter 5 to __builtin_cop is not a valid coprocessor operation."); |
| + error ("Number should be between 0 and 127."); |
| + return NULL_RTX; |
| + } |
| + |
| + pat = GEN_FCN (icode) (op0, op1, op2, op3, op4); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + |
| + return target; |
| + } |
| + case AVR32_BUILTIN_MEMS: |
| + case AVR32_BUILTIN_MEMC: |
| + case AVR32_BUILTIN_MEMT: |
| + { |
| + if (!TARGET_RMW) |
| + error ("Trying to use __builtin_mem(s/c/t) when target does not support RMW insns."); |
| + |
| + switch (fcode) { |
| + case AVR32_BUILTIN_MEMS: |
| + icode = CODE_FOR_iorsi3; |
| + break; |
| + case AVR32_BUILTIN_MEMC: |
| + icode = CODE_FOR_andsi3; |
| + break; |
| + case AVR32_BUILTIN_MEMT: |
| + icode = CODE_FOR_xorsi3; |
| + break; |
| + } |
| + |
| + arg0 = TREE_VALUE (arglist); |
| + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); |
| + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); |
| + if ( GET_CODE (op0) == SYMBOL_REF ) |
| + // This symbol must be RMW addressable |
| + SYMBOL_REF_FLAGS (op0) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT); |
| + op0 = gen_rtx_MEM(SImode, op0); |
| + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); |
| + mode0 = insn_data[icode].operand[1].mode; |
| + |
| + |
| + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) |
| + { |
| + error ("Parameter 1 to __builtin_mem(s/c/t) must be a Ks15<<2 address or a rmw addressable symbol."); |
| + } |
| + |
| + if ( !CONST_INT_P (op1) |
| + || INTVAL (op1) > 31 |
| + || INTVAL (op1) < 0 ) |
| + error ("Parameter 2 to __builtin_mem(s/c/t) must be a constant between 0 and 31."); |
| + |
| + if ( fcode == AVR32_BUILTIN_MEMC ) |
| + op1 = GEN_INT((~(1 << INTVAL(op1)))&0xffffffff); |
| + else |
| + op1 = GEN_INT((1 << INTVAL(op1))&0xffffffff); |
| + pat = GEN_FCN (icode) (op0, op0, op1); |
| + if (!pat) |
| + return 0; |
| + emit_insn (pat); |
| + return op0; |
| + } |
| + |
| + } |
| + |
| + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++) |
| + if (d->code == fcode) |
| + return avr32_expand_binop_builtin (d->icode, arglist, target); |
| + |
| + |
| + /* @@@ Should really do something sensible here. */ |
| + return NULL_RTX; |
| +} |
| + |
| + |
| +/* Handle an "interrupt" or "isr" attribute; |
| + arguments as in struct attribute_spec.handler. */ |
| + |
| +static tree |
| +avr32_handle_isr_attribute (tree * node, tree name, tree args, |
| + int flags, bool * no_add_attrs) |
| +{ |
| + if (DECL_P (*node)) |
| + { |
| + if (TREE_CODE (*node) != FUNCTION_DECL) |
| + { |
| + warning (OPT_Wattributes,"`%s' attribute only applies to functions", |
| + IDENTIFIER_POINTER (name)); |
| + *no_add_attrs = true; |
| + } |
| + /* FIXME: the argument if any is checked for type attributes; should it |
| + be checked for decl ones? */ |
| + } |
| + else |
| + { |
| + if (TREE_CODE (*node) == FUNCTION_TYPE |
| + || TREE_CODE (*node) == METHOD_TYPE) |
| + { |
| + if (avr32_isr_value (args) == AVR32_FT_UNKNOWN) |
| + { |
| + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name)); |
| + *no_add_attrs = true; |
| + } |
| + } |
| + else if (TREE_CODE (*node) == POINTER_TYPE |
| + && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE |
| + || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE) |
| + && avr32_isr_value (args) != AVR32_FT_UNKNOWN) |
| + { |
| + *node = build_variant_type_copy (*node); |
| + TREE_TYPE (*node) = build_type_attribute_variant |
| + (TREE_TYPE (*node), |
| + tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node)))); |
| + *no_add_attrs = true; |
| + } |
| + else |
| + { |
| + /* Possibly pass this attribute on from the type to a decl. */ |
| + if (flags & ((int) ATTR_FLAG_DECL_NEXT |
| + | (int) ATTR_FLAG_FUNCTION_NEXT |
| + | (int) ATTR_FLAG_ARRAY_NEXT)) |
| + { |
| + *no_add_attrs = true; |
| + return tree_cons (name, args, NULL_TREE); |
| + } |
| + else |
| + { |
| + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name)); |
| + } |
| + } |
| + } |
| + |
| + return NULL_TREE; |
| +} |
| + |
| +/* Handle an attribute requiring a FUNCTION_DECL; |
| + arguments as in struct attribute_spec.handler. */ |
| +static tree |
| +avr32_handle_fndecl_attribute (tree * node, tree name, |
| + tree args ATTRIBUTE_UNUSED, |
| + int flags ATTRIBUTE_UNUSED, |
| + bool * no_add_attrs) |
| +{ |
| + if (TREE_CODE (*node) != FUNCTION_DECL) |
| + { |
| + warning (OPT_Wattributes,"%qs attribute only applies to functions", |
| + IDENTIFIER_POINTER (name)); |
| + *no_add_attrs = true; |
| + } |
| + |
| + return NULL_TREE; |
| +} |
| + |
| + |
| +/* Handle an acall attribute; |
| + arguments as in struct attribute_spec.handler. */ |
| + |
| +static tree |
| +avr32_handle_acall_attribute (tree * node, tree name, |
| + tree args ATTRIBUTE_UNUSED, |
| + int flags ATTRIBUTE_UNUSED, bool * no_add_attrs) |
| +{ |
| + if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE) |
| + { |
| + warning (OPT_Wattributes,"`%s' attribute not yet supported...", |
| + IDENTIFIER_POINTER (name)); |
| + *no_add_attrs = true; |
| + return NULL_TREE; |
| + } |
| + |
| + warning (OPT_Wattributes,"`%s' attribute only applies to functions", |
| + IDENTIFIER_POINTER (name)); |
| + *no_add_attrs = true; |
| + return NULL_TREE; |
| +} |
| + |
| + |
| +/* Return 0 if the attributes for two types are incompatible, 1 if they |
| + are compatible, and 2 if they are nearly compatible (which causes a |
| + warning to be generated). */ |
| + |
| +static int |
| +avr32_comp_type_attributes (tree type1, tree type2) |
| +{ |
| + int acall1, acall2, isr1, isr2, naked1, naked2; |
| + |
| + /* Check for mismatch of non-default calling convention. */ |
| + if (TREE_CODE (type1) != FUNCTION_TYPE) |
| + return 1; |
| + |
| + /* Check for mismatched call attributes. */ |
| + acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL; |
| + acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL; |
| + naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL; |
| + naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL; |
| + isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL; |
| + if (!isr1) |
| + isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL; |
| + |
| + isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL; |
| + if (!isr2) |
| + isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL; |
| + |
| + if ((acall1 && isr2) |
| + || (acall2 && isr1) || (naked1 && isr2) || (naked2 && isr1)) |
| + return 0; |
| + |
| + return 1; |
| +} |
| + |
| + |
| +/* Computes the type of the current function. */ |
| + |
| +static unsigned long |
| +avr32_compute_func_type (void) |
| +{ |
| + unsigned long type = AVR32_FT_UNKNOWN; |
| + tree a; |
| + tree attr; |
| + |
| + if (TREE_CODE (current_function_decl) != FUNCTION_DECL) |
| + abort (); |
| + |
| + /* Decide if the current function is volatile. Such functions never |
| + return, and many memory cycles can be saved by not storing register |
| + values that will never be needed again. This optimization was added to |
| + speed up context switching in a kernel application. */ |
| + if (optimize > 0 |
| + && TREE_NOTHROW (current_function_decl) |
| + && TREE_THIS_VOLATILE (current_function_decl)) |
| + type |= AVR32_FT_VOLATILE; |
| + |
| + if (cfun->static_chain_decl != NULL) |
| + type |= AVR32_FT_NESTED; |
| + |
| + attr = DECL_ATTRIBUTES (current_function_decl); |
| + |
| + a = lookup_attribute ("isr", attr); |
| + if (a == NULL_TREE) |
| + a = lookup_attribute ("interrupt", attr); |
| + |
| + if (a == NULL_TREE) |
| + type |= AVR32_FT_NORMAL; |
| + else |
| + type |= avr32_isr_value (TREE_VALUE (a)); |
| + |
| + |
| + a = lookup_attribute ("acall", attr); |
| + if (a != NULL_TREE) |
| + type |= AVR32_FT_ACALL; |
| + |
| + a = lookup_attribute ("naked", attr); |
| + if (a != NULL_TREE) |
| + type |= AVR32_FT_NAKED; |
| + |
| + return type; |
| +} |
| + |
| +/* Returns the type of the current function. */ |
| + |
| +static unsigned long |
| +avr32_current_func_type (void) |
| +{ |
| + if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN) |
| + cfun->machine->func_type = avr32_compute_func_type (); |
| + |
| + return cfun->machine->func_type; |
| +} |
| + |
| +/* |
| + This target hook should return true if we should not pass type solely |
| + in registers. The file expr.h defines a definition that is usually appropriate, |
| + refer to expr.h for additional documentation. |
| +*/ |
| +bool |
| +avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type) |
| +{ |
| + if (type && AGGREGATE_TYPE_P (type) |
| + /* If the alignment is less than the size then pass in the struct on |
| + the stack. */ |
| + && ((unsigned int) TYPE_ALIGN_UNIT (type) < |
| + (unsigned int) int_size_in_bytes (type)) |
| + /* If we support unaligned word accesses then structs of size 4 and 8 |
| + can have any alignment and still be passed in registers. */ |
| + && !(TARGET_UNALIGNED_WORD |
| + && (int_size_in_bytes (type) == 4 |
| + || int_size_in_bytes (type) == 8)) |
| + /* Double word structs need only a word alignment. */ |
| + && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4)) |
| + return true; |
| + |
| + if (type && AGGREGATE_TYPE_P (type) |
| + /* Structs of size 3,5,6,7 are always passed in registers. */ |
| + && (int_size_in_bytes (type) == 3 |
| + || int_size_in_bytes (type) == 5 |
| + || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7)) |
| + return true; |
| + |
| + |
| + return (type && TREE_ADDRESSABLE (type)); |
| +} |
| + |
| + |
| +bool |
| +avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED) |
| +{ |
| + return true; |
| +} |
| + |
| +/* |
| + This target hook should return true if an argument at the position indicated |
| + by cum should be passed by reference. This predicate is queried after target |
| + independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type). |
| + |
| + If the hook returns true, a copy of that argument is made in memory and a |
| + pointer to the argument is passed instead of the argument itself. The pointer |
| + is passed in whatever way is appropriate for passing a pointer to that type. |
| +*/ |
| +bool |
| +avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED, |
| + enum machine_mode mode ATTRIBUTE_UNUSED, |
| + tree type, bool named ATTRIBUTE_UNUSED) |
| +{ |
| + return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)); |
| +} |
| + |
| +static int |
| +avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED, |
| + enum machine_mode mode ATTRIBUTE_UNUSED, |
| + tree type ATTRIBUTE_UNUSED, |
| + bool named ATTRIBUTE_UNUSED) |
| +{ |
| + return 0; |
| +} |
| + |
| + |
| +struct gcc_target targetm = TARGET_INITIALIZER; |
| + |
| +/* |
| + Table used to convert from register number in the assembler instructions and |
| + the register numbers used in gcc. |
| +*/ |
| +const int avr32_function_arg_reglist[] = { |
| + INTERNAL_REGNUM (12), |
| + INTERNAL_REGNUM (11), |
| + INTERNAL_REGNUM (10), |
| + INTERNAL_REGNUM (9), |
| + INTERNAL_REGNUM (8) |
| +}; |
| + |
| +rtx avr32_compare_op0 = NULL_RTX; |
| +rtx avr32_compare_op1 = NULL_RTX; |
| +rtx avr32_compare_operator = NULL_RTX; |
| +rtx avr32_acc_cache = NULL_RTX; |
| + |
| +/* |
| + Returns nonzero if it is allowed to store a value of mode mode in hard |
| + register number regno. |
| +*/ |
| +int |
| +avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode) |
| +{ |
| + /* We allow only float modes in the fp-registers */ |
| + if (regnr >= FIRST_FP_REGNUM |
| + && regnr <= LAST_FP_REGNUM && GET_MODE_CLASS (mode) != MODE_FLOAT) |
| + { |
| + return 0; |
| + } |
| + |
| + switch (mode) |
| + { |
| + case DImode: /* long long */ |
| + case DFmode: /* double */ |
| + case SCmode: /* __complex__ float */ |
| + case CSImode: /* __complex__ int */ |
| + if (regnr < 4) |
| + { /* long long int not supported in r12, sp, lr |
| + or pc. */ |
| + return 0; |
| + } |
| + else |
| + { |
| + if (regnr % 2) /* long long int has to be refered in even |
| + registers. */ |
| + return 0; |
| + else |
| + return 1; |
| + } |
| + case CDImode: /* __complex__ long long */ |
| + case DCmode: /* __complex__ double */ |
| + case TImode: /* 16 bytes */ |
| + if (regnr < 7) |
| + return 0; |
| + else if (regnr % 2) |
| + return 0; |
| + else |
| + return 1; |
| + default: |
| + return 1; |
| + } |
| +} |
| + |
| + |
| +int |
| +avr32_rnd_operands (rtx add, rtx shift) |
| +{ |
| + if (GET_CODE (shift) == CONST_INT && |
| + GET_CODE (add) == CONST_INT && INTVAL (shift) > 0) |
| + { |
| + if ((1 << (INTVAL (shift) - 1)) == INTVAL (add)) |
| + return TRUE; |
| + } |
| + |
| + return FALSE; |
| +} |
| + |
| + |
| + |
| +int |
| +avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str) |
| +{ |
| + switch (c) |
| + { |
| + case 'K': |
| + case 'I': |
| + { |
| + HOST_WIDE_INT min_value = 0, max_value = 0; |
| + char size_str[3]; |
| + int const_size; |
| + |
| + size_str[0] = str[2]; |
| + size_str[1] = str[3]; |
| + size_str[2] = '\0'; |
| + const_size = atoi (size_str); |
| + |
| + if (toupper (str[1]) == 'U') |
| + { |
| + min_value = 0; |
| + max_value = (1 << const_size) - 1; |
| + } |
| + else if (toupper (str[1]) == 'S') |
| + { |
| + min_value = -(1 << (const_size - 1)); |
| + max_value = (1 << (const_size - 1)) - 1; |
| + } |
| + |
| + if (c == 'I') |
| + { |
| + value = -value; |
| + } |
| + |
| + if (value >= min_value && value <= max_value) |
| + { |
| + return 1; |
| + } |
| + break; |
| + } |
| + case 'M': |
| + return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode); |
| + case 'J': |
| + return avr32_hi16_immediate_operand (GEN_INT (value), VOIDmode); |
| + case 'O': |
| + return one_bit_set_operand (GEN_INT (value), VOIDmode); |
| + case 'N': |
| + return one_bit_cleared_operand (GEN_INT (value), VOIDmode); |
| + case 'L': |
| + /* The lower 16-bits are set. */ |
| + return ((value & 0xffff) == 0xffff) ; |
| + } |
| + |
| + return 0; |
| +} |
| + |
| + |
| +/*Compute mask of which floating-point registers needs saving upon |
| + entry to this function*/ |
| +static unsigned long |
| +avr32_compute_save_fp_reg_mask (void) |
| +{ |
| + unsigned long func_type = avr32_current_func_type (); |
| + unsigned int save_reg_mask = 0; |
| + unsigned int reg; |
| + unsigned int max_reg = 7; |
| + int save_all_call_used_regs = FALSE; |
| + |
| + /* This only applies for hardware floating-point implementation. */ |
| + if (!TARGET_HARD_FLOAT) |
| + return 0; |
| + |
| + if (IS_INTERRUPT (func_type)) |
| + { |
| + |
| + /* Interrupt functions must not corrupt any registers, even call |
| + clobbered ones. If this is a leaf function we can just examine the |
| + registers used by the RTL, but otherwise we have to assume that |
| + whatever function is called might clobber anything, and so we have |
| + to save all the call-clobbered registers as well. */ |
| + max_reg = 13; |
| + save_all_call_used_regs = !current_function_is_leaf; |
| + } |
| + |
| + /* All used registers used must be saved */ |
| + for (reg = 0; reg <= max_reg; reg++) |
| + if (regs_ever_live[INTERNAL_FP_REGNUM (reg)] |
| + || (save_all_call_used_regs |
| + && call_used_regs[INTERNAL_FP_REGNUM (reg)])) |
| + save_reg_mask |= (1 << reg); |
| + |
| + return save_reg_mask; |
| +} |
| + |
| +/*Compute mask of registers which needs saving upon function entry */ |
| +static unsigned long |
| +avr32_compute_save_reg_mask (int push) |
| +{ |
| + unsigned long func_type; |
| + unsigned int save_reg_mask = 0; |
| + unsigned int reg; |
| + |
| + func_type = avr32_current_func_type (); |
| + |
| + if (IS_INTERRUPT (func_type)) |
| + { |
| + unsigned int max_reg = 12; |
| + |
| + |
| + /* Get the banking scheme for the interrupt */ |
| + switch (func_type) |
| + { |
| + case AVR32_FT_ISR_FULL: |
| + max_reg = 0; |
| + break; |
| + case AVR32_FT_ISR_HALF: |
| + max_reg = 7; |
| + break; |
| + case AVR32_FT_ISR_NONE: |
| + max_reg = 12; |
| + break; |
| + } |
| + |
| + /* Interrupt functions must not corrupt any registers, even call |
| + clobbered ones. If this is a leaf function we can just examine the |
| + registers used by the RTL, but otherwise we have to assume that |
| + whatever function is called might clobber anything, and so we have |
| + to save all the call-clobbered registers as well. */ |
| + |
| + /* Need not push the registers r8-r12 for AVR32A architectures, as this |
| + is automatially done in hardware. We also do not have any shadow |
| + registers. */ |
| + if (TARGET_UARCH_AVR32A) |
| + { |
| + max_reg = 7; |
| + func_type = AVR32_FT_ISR_NONE; |
| + } |
| + |
| + /* All registers which are used and is not shadowed must be saved */ |
| + for (reg = 0; reg <= max_reg; reg++) |
| + if (regs_ever_live[INTERNAL_REGNUM (reg)] |
| + || (!current_function_is_leaf |
| + && call_used_regs[INTERNAL_REGNUM (reg)])) |
| + save_reg_mask |= (1 << reg); |
| + |
| + /* Check LR */ |
| + if ((regs_ever_live[LR_REGNUM] |
| + || !current_function_is_leaf || frame_pointer_needed) |
| + /* Only non-shadowed register models */ |
| + && (func_type == AVR32_FT_ISR_NONE)) |
| + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM)); |
| + |
| + /* Make sure that the GOT register is pushed. */ |
| + if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM) |
| + && current_function_uses_pic_offset_table) |
| + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)); |
| + |
| + } |
| + else |
| + { |
| + int use_pushm = optimize_size; |
| + |
| + /* In the normal case we only need to save those registers which are |
| + call saved and which are used by this function. */ |
| + for (reg = 0; reg <= 7; reg++) |
| + if (regs_ever_live[INTERNAL_REGNUM (reg)] |
| + && !call_used_regs[INTERNAL_REGNUM (reg)]) |
| + save_reg_mask |= (1 << reg); |
| + |
| + /* Make sure that the GOT register is pushed. */ |
| + if (current_function_uses_pic_offset_table) |
| + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)); |
| + |
| + |
| + /* If we optimize for size and do not have anonymous arguments: use |
| + popm/pushm always */ |
| + if (use_pushm) |
| + { |
| + if ((save_reg_mask & (1 << 0)) |
| + || (save_reg_mask & (1 << 1)) |
| + || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3))) |
| + save_reg_mask |= 0xf; |
| + |
| + if ((save_reg_mask & (1 << 4)) |
| + || (save_reg_mask & (1 << 5)) |
| + || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7))) |
| + save_reg_mask |= 0xf0; |
| + |
| + if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9))) |
| + save_reg_mask |= 0x300; |
| + } |
| + |
| + |
| + /* Check LR */ |
| + if ((regs_ever_live[LR_REGNUM] |
| + || !current_function_is_leaf |
| + || (optimize_size |
| + && save_reg_mask |
| + && !current_function_calls_eh_return) || frame_pointer_needed)) |
| + { |
| + if (push |
| + /* Never pop LR into PC for functions which |
| + calls __builtin_eh_return, since we need to |
| + fix the SP after the restoring of the registers |
| + and before returning. */ |
| + || current_function_calls_eh_return) |
| + { |
| + /* Push/Pop LR */ |
| + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM)); |
| + } |
| + else |
| + { |
| + /* Pop PC */ |
| + save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM)); |
| + } |
| + } |
| + } |
| + |
| + |
| + /* Save registers so the exception handler can modify them. */ |
| + if (current_function_calls_eh_return) |
| + { |
| + unsigned int i; |
| + |
| + for (i = 0;; i++) |
| + { |
| + reg = EH_RETURN_DATA_REGNO (i); |
| + if (reg == INVALID_REGNUM) |
| + break; |
| + save_reg_mask |= 1 << ASM_REGNUM (reg); |
| + } |
| + } |
| + |
| + return save_reg_mask; |
| +} |
| + |
| +/*Compute total size in bytes of all saved registers */ |
| +static int |
| +avr32_get_reg_mask_size (int reg_mask) |
| +{ |
| + int reg, size; |
| + size = 0; |
| + |
| + for (reg = 0; reg <= 15; reg++) |
| + if (reg_mask & (1 << reg)) |
| + size += 4; |
| + |
| + return size; |
| +} |
| + |
| +/*Get a register from one of the registers which are saved onto the stack |
| + upon function entry */ |
| + |
| +static int |
| +avr32_get_saved_reg (int save_reg_mask) |
| +{ |
| + unsigned int reg; |
| + |
| + /* Find the first register which is saved in the saved_reg_mask */ |
| + for (reg = 0; reg <= 15; reg++) |
| + if (save_reg_mask & (1 << reg)) |
| + return reg; |
| + |
| + return -1; |
| +} |
| + |
| +/* Return 1 if it is possible to return using a single instruction. */ |
| +int |
| +avr32_use_return_insn (int iscond) |
| +{ |
| + unsigned int func_type = avr32_current_func_type (); |
| + unsigned long saved_int_regs; |
| + unsigned long saved_fp_regs; |
| + |
| + /* Never use a return instruction before reload has run. */ |
| + if (!reload_completed) |
| + return 0; |
| + |
| + /* Must adjust the stack for vararg functions. */ |
| + if (current_function_args_info.uses_anonymous_args) |
| + return 0; |
| + |
| + /* If there a stack adjstment. */ |
| + if (get_frame_size ()) |
| + return 0; |
| + |
| + saved_int_regs = avr32_compute_save_reg_mask (TRUE); |
| + saved_fp_regs = avr32_compute_save_fp_reg_mask (); |
| + |
| + /* Functions which have saved fp-regs on the stack can not be performed in |
| + one instruction */ |
| + if (saved_fp_regs) |
| + return 0; |
| + |
| + /* Conditional returns can not be performed in one instruction if we need |
| + to restore registers from the stack */ |
| + if (iscond && saved_int_regs) |
| + return 0; |
| + |
| + /* Conditional return can not be used for interrupt handlers. */ |
| + if (iscond && IS_INTERRUPT (func_type)) |
| + return 0; |
| + |
| + /* For interrupt handlers which needs to pop registers */ |
| + if (saved_int_regs && IS_INTERRUPT (func_type)) |
| + return 0; |
| + |
| + |
| + /* If there are saved registers but the LR isn't saved, then we need two |
| + instructions for the return. */ |
| + if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM)))) |
| + return 0; |
| + |
| + |
| + return 1; |
| +} |
| + |
| + |
| +/*Generate some function prologue info in the assembly file*/ |
| + |
| +void |
| +avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size) |
| +{ |
| + if (IS_NAKED (avr32_current_func_type ())) |
| + fprintf (f, |
| + "\t# Function is naked: Prologue and epilogue provided by programmer\n"); |
| + |
| + if (IS_INTERRUPT (avr32_current_func_type ())) |
| + { |
| + switch (avr32_current_func_type ()) |
| + { |
| + case AVR32_FT_ISR_FULL: |
| + fprintf (f, |
| + "\t# Interrupt Function: Fully shadowed register file\n"); |
| + break; |
| + case AVR32_FT_ISR_HALF: |
| + fprintf (f, |
| + "\t# Interrupt Function: Half shadowed register file\n"); |
| + break; |
| + default: |
| + case AVR32_FT_ISR_NONE: |
| + fprintf (f, "\t# Interrupt Function: No shadowed register file\n"); |
| + break; |
| + } |
| + } |
| + |
| + |
| + fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n", |
| + current_function_args_size, frame_size, |
| + current_function_pretend_args_size); |
| + |
| + fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n", |
| + frame_pointer_needed, current_function_is_leaf); |
| + |
| + fprintf (f, "\t# uses_anonymous_args = %i\n", |
| + current_function_args_info.uses_anonymous_args); |
| + if (current_function_calls_eh_return) |
| + fprintf (f, "\t# Calls __builtin_eh_return.\n"); |
| + |
| +} |
| + |
| + |
| +/* Generate and emit an insn that we will recognize as a pushm or stm. |
| + Unfortunately, since this insn does not reflect very well the actual |
| + semantics of the operation, we need to annotate the insn for the benefit |
| + of DWARF2 frame unwind information. */ |
| + |
| +int avr32_convert_to_reglist16 (int reglist8_vect); |
| + |
| +static rtx |
| +emit_multi_reg_push (int reglist, int usePUSHM) |
| +{ |
| + rtx insn; |
| + rtx dwarf; |
| + rtx tmp; |
| + rtx reg; |
| + int i; |
| + int nr_regs; |
| + int index = 0; |
| + |
| + if (usePUSHM) |
| + { |
| + insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist))); |
| + reglist = avr32_convert_to_reglist16 (reglist); |
| + } |
| + else |
| + { |
| + insn = emit_insn (gen_stm (stack_pointer_rtx, |
| + gen_rtx_CONST_INT (SImode, reglist), |
| + gen_rtx_CONST_INT (SImode, 1))); |
| + } |
| + |
| + nr_regs = avr32_get_reg_mask_size (reglist) / 4; |
| + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1)); |
| + |
| + for (i = 15; i >= 0; i--) |
| + { |
| + if (reglist & (1 << i)) |
| + { |
| + reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i)); |
| + tmp = gen_rtx_SET (VOIDmode, |
| + gen_rtx_MEM (SImode, |
| + plus_constant (stack_pointer_rtx, |
| + 4 * index)), reg); |
| + RTX_FRAME_RELATED_P (tmp) = 1; |
| + XVECEXP (dwarf, 0, 1 + index++) = tmp; |
| + } |
| + } |
| + |
| + tmp = gen_rtx_SET (SImode, |
| + stack_pointer_rtx, |
| + gen_rtx_PLUS (SImode, |
| + stack_pointer_rtx, |
| + GEN_INT (-4 * nr_regs))); |
| + RTX_FRAME_RELATED_P (tmp) = 1; |
| + XVECEXP (dwarf, 0, 0) = tmp; |
| + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf, |
| + REG_NOTES (insn)); |
| + return insn; |
| +} |
| + |
| + |
| +static rtx |
| +emit_multi_fp_reg_push (int reglist) |
| +{ |
| + rtx insn; |
| + rtx dwarf; |
| + rtx tmp; |
| + rtx reg; |
| + int i; |
| + int nr_regs; |
| + int index = 0; |
| + |
| + insn = emit_insn (gen_stm_fp (stack_pointer_rtx, |
| + gen_rtx_CONST_INT (SImode, reglist), |
| + gen_rtx_CONST_INT (SImode, 1))); |
| + |
| + nr_regs = avr32_get_reg_mask_size (reglist) / 4; |
| + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1)); |
| + |
| + for (i = 15; i >= 0; i--) |
| + { |
| + if (reglist & (1 << i)) |
| + { |
| + reg = gen_rtx_REG (SImode, INTERNAL_FP_REGNUM (i)); |
| + tmp = gen_rtx_SET (VOIDmode, |
| + gen_rtx_MEM (SImode, |
| + plus_constant (stack_pointer_rtx, |
| + 4 * index)), reg); |
| + RTX_FRAME_RELATED_P (tmp) = 1; |
| + XVECEXP (dwarf, 0, 1 + index++) = tmp; |
| + } |
| + } |
| + |
| + tmp = gen_rtx_SET (SImode, |
| + stack_pointer_rtx, |
| + gen_rtx_PLUS (SImode, |
| + stack_pointer_rtx, |
| + GEN_INT (-4 * nr_regs))); |
| + RTX_FRAME_RELATED_P (tmp) = 1; |
| + XVECEXP (dwarf, 0, 0) = tmp; |
| + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf, |
| + REG_NOTES (insn)); |
| + return insn; |
| +} |
| + |
| +rtx |
| +avr32_gen_load_multiple (rtx * regs, int count, rtx from, |
| + int write_back, int in_struct_p, int scalar_p) |
| +{ |
| + |
| + rtx result; |
| + int i = 0, j; |
| + |
| + result = |
| + gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0))); |
| + |
| + if (write_back) |
| + { |
| + XVECEXP (result, 0, 0) |
| + = gen_rtx_SET (GET_MODE (from), from, |
| + plus_constant (from, count * 4)); |
| + i = 1; |
| + count++; |
| + } |
| + |
| + |
| + for (j = 0; i < count; i++, j++) |
| + { |
| + rtx unspec; |
| + rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4)); |
| + MEM_IN_STRUCT_P (mem) = in_struct_p; |
| + MEM_SCALAR_P (mem) = scalar_p; |
| + unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM); |
| + XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec); |
| + } |
| + |
| + return result; |
| +} |
| + |
| + |
| +rtx |
| +avr32_gen_store_multiple (rtx * regs, int count, rtx to, |
| + int in_struct_p, int scalar_p) |
| +{ |
| + rtx result; |
| + int i = 0, j; |
| + |
| + result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); |
| + |
| + for (j = 0; i < count; i++, j++) |
| + { |
| + rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4)); |
| + MEM_IN_STRUCT_P (mem) = in_struct_p; |
| + MEM_SCALAR_P (mem) = scalar_p; |
| + XVECEXP (result, 0, i) |
| + = gen_rtx_SET (VOIDmode, mem, |
| + gen_rtx_UNSPEC (VOIDmode, |
| + gen_rtvec (1, regs[j]), |
| + UNSPEC_STORE_MULTIPLE)); |
| + } |
| + |
| + return result; |
| +} |
| + |
| + |
| +/* Move a block of memory if it is word aligned or we support unaligned |
| + word memory accesses. The size must be maximum 64 bytes. */ |
| + |
| +int |
| +avr32_gen_movmemsi (rtx * operands) |
| +{ |
| + HOST_WIDE_INT bytes_to_go; |
| + rtx src, dst; |
| + rtx st_src, st_dst; |
| + int src_offset = 0, dst_offset = 0; |
| + int block_size; |
| + int dst_in_struct_p, src_in_struct_p; |
| + int dst_scalar_p, src_scalar_p; |
| + int unaligned; |
| + |
| + if (GET_CODE (operands[2]) != CONST_INT |
| + || GET_CODE (operands[3]) != CONST_INT |
| + || INTVAL (operands[2]) > 64 |
| + || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD)) |
| + return 0; |
| + |
| + unaligned = (INTVAL (operands[3]) & 3) != 0; |
| + |
| + block_size = 4; |
| + |
| + st_dst = XEXP (operands[0], 0); |
| + st_src = XEXP (operands[1], 0); |
| + |
| + dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]); |
| + dst_scalar_p = MEM_SCALAR_P (operands[0]); |
| + src_in_struct_p = MEM_IN_STRUCT_P (operands[1]); |
| + src_scalar_p = MEM_SCALAR_P (operands[1]); |
| + |
| + dst = copy_to_mode_reg (SImode, st_dst); |
| + src = copy_to_mode_reg (SImode, st_src); |
| + |
| + bytes_to_go = INTVAL (operands[2]); |
| + |
| + while (bytes_to_go) |
| + { |
| + enum machine_mode move_mode; |
| + /* (Seems to be a problem with reloads for the movti pattern so this is |
| + disabled until that problem is resolved) |
| + UPDATE: Problem seems to be solved now.... */ |
| + if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned |
| + /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */ |
| + && !TARGET_ARCH_UC) |
| + move_mode = TImode; |
| + else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned) |
| + move_mode = DImode; |
| + else if (bytes_to_go >= GET_MODE_SIZE (SImode)) |
| + move_mode = SImode; |
| + else |
| + move_mode = QImode; |
| + |
| + { |
| + rtx src_mem; |
| + rtx dst_mem = gen_rtx_MEM (move_mode, |
| + gen_rtx_PLUS (SImode, dst, |
| + GEN_INT (dst_offset))); |
| + dst_offset += GET_MODE_SIZE (move_mode); |
| + if ( 0 /* This causes an error in GCC. Think there is |
| + something wrong in the gcse pass which causes REQ_EQUIV notes |
| + to be wrong so disabling it for now. */ |
| + && move_mode == TImode |
| + && INTVAL (operands[2]) > GET_MODE_SIZE (TImode) ) |
| + { |
| + src_mem = gen_rtx_MEM (move_mode, |
| + gen_rtx_POST_INC (SImode, src)); |
| + } |
| + else |
| + { |
| + src_mem = gen_rtx_MEM (move_mode, |
| + gen_rtx_PLUS (SImode, src, |
| + GEN_INT (src_offset))); |
| + src_offset += GET_MODE_SIZE (move_mode); |
| + } |
| + |
| + bytes_to_go -= GET_MODE_SIZE (move_mode); |
| + |
| + MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p; |
| + MEM_SCALAR_P (dst_mem) = dst_scalar_p; |
| + |
| + MEM_IN_STRUCT_P (src_mem) = src_in_struct_p; |
| + MEM_SCALAR_P (src_mem) = src_scalar_p; |
| + emit_move_insn (dst_mem, src_mem); |
| + |
| + } |
| + } |
| + |
| + return 1; |
| +} |
| + |
| + |
| + |
| +/*Expand the prologue instruction*/ |
| +void |
| +avr32_expand_prologue (void) |
| +{ |
| + rtx insn, dwarf; |
| + unsigned long saved_reg_mask, saved_fp_reg_mask; |
| + int reglist8 = 0; |
| + |
| + /* Naked functions does not have a prologue */ |
| + if (IS_NAKED (avr32_current_func_type ())) |
| + return; |
| + |
| + saved_reg_mask = avr32_compute_save_reg_mask (TRUE); |
| + |
| + if (saved_reg_mask) |
| + { |
| + /* Must push used registers */ |
| + |
| + /* Should we use POPM or LDM? */ |
| + int usePUSHM = TRUE; |
| + reglist8 = 0; |
| + if (((saved_reg_mask & (1 << 0)) || |
| + (saved_reg_mask & (1 << 1)) || |
| + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3)))) |
| + { |
| + /* One of R0-R3 should at least be pushed */ |
| + if (((saved_reg_mask & (1 << 0)) && |
| + (saved_reg_mask & (1 << 1)) && |
| + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3)))) |
| + { |
| + /* All should be pushed */ |
| + reglist8 |= 0x01; |
| + } |
| + else |
| + { |
| + usePUSHM = FALSE; |
| + } |
| + } |
| + |
| + if (((saved_reg_mask & (1 << 4)) || |
| + (saved_reg_mask & (1 << 5)) || |
| + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7)))) |
| + { |
| + /* One of R4-R7 should at least be pushed */ |
| + if (((saved_reg_mask & (1 << 4)) && |
| + (saved_reg_mask & (1 << 5)) && |
| + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7)))) |
| + { |
| + if (usePUSHM) |
| + /* All should be pushed */ |
| + reglist8 |= 0x02; |
| + } |
| + else |
| + { |
| + usePUSHM = FALSE; |
| + } |
| + } |
| + |
| + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9)))) |
| + { |
| + /* One of R8-R9 should at least be pushed */ |
| + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9)))) |
| + { |
| + if (usePUSHM) |
| + /* All should be pushed */ |
| + reglist8 |= 0x04; |
| + } |
| + else |
| + { |
| + usePUSHM = FALSE; |
| + } |
| + } |
| + |
| + if (saved_reg_mask & (1 << 10)) |
| + reglist8 |= 0x08; |
| + |
| + if (saved_reg_mask & (1 << 11)) |
| + reglist8 |= 0x10; |
| + |
| + if (saved_reg_mask & (1 << 12)) |
| + reglist8 |= 0x20; |
| + |
| + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM))) |
| + { |
| + /* Push LR */ |
| + reglist8 |= 0x40; |
| + } |
| + |
| + if (usePUSHM) |
| + { |
| + insn = emit_multi_reg_push (reglist8, TRUE); |
| + } |
| + else |
| + { |
| + insn = emit_multi_reg_push (saved_reg_mask, FALSE); |
| + } |
| + RTX_FRAME_RELATED_P (insn) = 1; |
| + |
| + /* Prevent this instruction from being scheduled after any other |
| + instructions. */ |
| + emit_insn (gen_blockage ()); |
| + } |
| + |
| + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask (); |
| + if (saved_fp_reg_mask) |
| + { |
| + insn = emit_multi_fp_reg_push (saved_fp_reg_mask); |
| + RTX_FRAME_RELATED_P (insn) = 1; |
| + |
| + /* Prevent this instruction from being scheduled after any other |
| + instructions. */ |
| + emit_insn (gen_blockage ()); |
| + } |
| + |
| + /* Set frame pointer */ |
| + if (frame_pointer_needed) |
| + { |
| + insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx); |
| + RTX_FRAME_RELATED_P (insn) = 1; |
| + } |
| + |
| + if (get_frame_size () > 0) |
| + { |
| + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21")) |
| + { |
| + insn = emit_insn (gen_rtx_SET (SImode, |
| + stack_pointer_rtx, |
| + gen_rtx_PLUS (SImode, |
| + stack_pointer_rtx, |
| + gen_rtx_CONST_INT |
| + (SImode, |
| + -get_frame_size |
| + ())))); |
| + RTX_FRAME_RELATED_P (insn) = 1; |
| + } |
| + else |
| + { |
| + /* Immediate is larger than k21 We must either check if we can use |
| + one of the pushed reegisters as temporary storage or we must |
| + make us a temp register by pushing a register to the stack. */ |
| + rtx temp_reg, const_pool_entry, insn; |
| + if (saved_reg_mask) |
| + { |
| + temp_reg = |
| + gen_rtx_REG (SImode, |
| + INTERNAL_REGNUM (avr32_get_saved_reg |
| + (saved_reg_mask))); |
| + } |
| + else |
| + { |
| + temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7)); |
| + emit_move_insn (gen_rtx_MEM |
| + (SImode, |
| + gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)), |
| + temp_reg); |
| + } |
| + |
| + const_pool_entry = |
| + force_const_mem (SImode, |
| + gen_rtx_CONST_INT (SImode, get_frame_size ())); |
| + emit_move_insn (temp_reg, const_pool_entry); |
| + |
| + insn = emit_insn (gen_rtx_SET (SImode, |
| + stack_pointer_rtx, |
| + gen_rtx_MINUS (SImode, |
| + stack_pointer_rtx, |
| + temp_reg))); |
| + |
| + dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx, |
| + gen_rtx_PLUS (SImode, stack_pointer_rtx, |
| + GEN_INT (-get_frame_size ()))); |
| + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, |
| + dwarf, REG_NOTES (insn)); |
| + RTX_FRAME_RELATED_P (insn) = 1; |
| + |
| + if (!saved_reg_mask) |
| + { |
| + insn = |
| + emit_move_insn (temp_reg, |
| + gen_rtx_MEM (SImode, |
| + gen_rtx_POST_INC (SImode, |
| + gen_rtx_REG |
| + (SImode, |
| + 13)))); |
| + } |
| + |
| + /* Mark the temp register as dead */ |
| + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg, |
| + REG_NOTES (insn)); |
| + |
| + |
| + } |
| + |
| + /* Prevent the the stack adjustment to be scheduled after any |
| + instructions using the frame pointer. */ |
| + emit_insn (gen_blockage ()); |
| + } |
| + |
| + /* Load GOT */ |
| + if (flag_pic) |
| + { |
| + avr32_load_pic_register (); |
| + |
| + /* gcc does not know that load or call instructions might use the pic |
| + register so it might schedule these instructions before the loading |
| + of the pic register. To avoid this emit a barrier for now. TODO! |
| + Find out a better way to let gcc know which instructions might use |
| + the pic register. */ |
| + emit_insn (gen_blockage ()); |
| + } |
| + return; |
| +} |
| + |
| +void |
| +avr32_set_return_address (rtx source, rtx scratch) |
| +{ |
| + rtx addr; |
| + unsigned long saved_regs; |
| + |
| + saved_regs = avr32_compute_save_reg_mask (TRUE); |
| + |
| + if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM)))) |
| + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source); |
| + else |
| + { |
| + if (frame_pointer_needed) |
| + addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM); |
| + else |
| + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16")) |
| + { |
| + addr = plus_constant (stack_pointer_rtx, get_frame_size ()); |
| + } |
| + else |
| + { |
| + emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ()))); |
| + addr = scratch; |
| + } |
| + emit_move_insn (gen_rtx_MEM (Pmode, addr), source); |
| + } |
| +} |
| + |
| + |
| + |
| +/* Return the length of INSN. LENGTH is the initial length computed by |
| + attributes in the machine-description file. */ |
| + |
| +int |
| +avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED, |
| + int length ATTRIBUTE_UNUSED) |
| +{ |
| + return length; |
| +} |
| + |
| +void |
| +avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED, |
| + int iscond ATTRIBUTE_UNUSED, |
| + rtx cond ATTRIBUTE_UNUSED, rtx r12_imm) |
| +{ |
| + |
| + unsigned long saved_reg_mask, saved_fp_reg_mask; |
| + int insert_ret = TRUE; |
| + int reglist8 = 0; |
| + int stack_adjustment = get_frame_size (); |
| + unsigned int func_type = avr32_current_func_type (); |
| + FILE *f = asm_out_file; |
| + |
| + /* Naked functions does not have an epilogue */ |
| + if (IS_NAKED (func_type)) |
| + return; |
| + |
| + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask (); |
| + |
| + saved_reg_mask = avr32_compute_save_reg_mask (FALSE); |
| + |
| + /* Reset frame pointer */ |
| + if (stack_adjustment > 0) |
| + { |
| + if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21")) |
| + { |
| + fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n", |
| + -stack_adjustment); |
| + } |
| + else |
| + { |
| + /* TODO! Is it safe to use r8 as scratch?? */ |
| + fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n", |
| + -stack_adjustment); |
| + fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n", |
| + -stack_adjustment); |
| + fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n"); |
| + } |
| + } |
| + |
| + if (saved_fp_reg_mask) |
| + { |
| + char reglist[64]; /* 64 bytes should be enough... */ |
| + avr32_make_fp_reglist_w (saved_fp_reg_mask, (char *) reglist); |
| + fprintf (f, "\tldcm.w\tcp0, sp++, %s\n", reglist); |
| + if (saved_fp_reg_mask & ~0xff) |
| + { |
| + saved_fp_reg_mask &= ~0xff; |
| + avr32_make_fp_reglist_d (saved_fp_reg_mask, (char *) reglist); |
| + fprintf (f, "\tldcm.d\tcp0, sp++, %s\n", reglist); |
| + } |
| + } |
| + |
| + if (saved_reg_mask) |
| + { |
| + /* Must pop used registers */ |
| + |
| + /* Should we use POPM or LDM? */ |
| + int usePOPM = TRUE; |
| + if (((saved_reg_mask & (1 << 0)) || |
| + (saved_reg_mask & (1 << 1)) || |
| + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3)))) |
| + { |
| + /* One of R0-R3 should at least be popped */ |
| + if (((saved_reg_mask & (1 << 0)) && |
| + (saved_reg_mask & (1 << 1)) && |
| + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3)))) |
| + { |
| + /* All should be popped */ |
| + reglist8 |= 0x01; |
| + } |
| + else |
| + { |
| + usePOPM = FALSE; |
| + } |
| + } |
| + |
| + if (((saved_reg_mask & (1 << 4)) || |
| + (saved_reg_mask & (1 << 5)) || |
| + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7)))) |
| + { |
| + /* One of R0-R3 should at least be popped */ |
| + if (((saved_reg_mask & (1 << 4)) && |
| + (saved_reg_mask & (1 << 5)) && |
| + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7)))) |
| + { |
| + if (usePOPM) |
| + /* All should be popped */ |
| + reglist8 |= 0x02; |
| + } |
| + else |
| + { |
| + usePOPM = FALSE; |
| + } |
| + } |
| + |
| + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9)))) |
| + { |
| + /* One of R8-R9 should at least be pushed */ |
| + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9)))) |
| + { |
| + if (usePOPM) |
| + /* All should be pushed */ |
| + reglist8 |= 0x04; |
| + } |
| + else |
| + { |
| + usePOPM = FALSE; |
| + } |
| + } |
| + |
| + if (saved_reg_mask & (1 << 10)) |
| + reglist8 |= 0x08; |
| + |
| + if (saved_reg_mask & (1 << 11)) |
| + reglist8 |= 0x10; |
| + |
| + if (saved_reg_mask & (1 << 12)) |
| + reglist8 |= 0x20; |
| + |
| + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM))) |
| + /* Pop LR */ |
| + reglist8 |= 0x40; |
| + |
| + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM))) |
| + /* Pop LR into PC. */ |
| + reglist8 |= 0x80; |
| + |
| + if (usePOPM) |
| + { |
| + char reglist[64]; /* 64 bytes should be enough... */ |
| + avr32_make_reglist8 (reglist8, (char *) reglist); |
| + |
| + if (reglist8 & 0x80) |
| + /* This instruction is also a return */ |
| + insert_ret = FALSE; |
| + |
| + if (r12_imm && !insert_ret) |
| + fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm)); |
| + else |
| + fprintf (f, "\tpopm\t%s\n", reglist); |
| + |
| + } |
| + else |
| + { |
| + char reglist[64]; /* 64 bytes should be enough... */ |
| + avr32_make_reglist16 (saved_reg_mask, (char *) reglist); |
| + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM))) |
| + /* This instruction is also a return */ |
| + insert_ret = FALSE; |
| + |
| + if (r12_imm && !insert_ret) |
| + fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist, |
| + INTVAL (r12_imm)); |
| + else |
| + fprintf (f, "\tldm\tsp++, %s\n", reglist); |
| + |
| + } |
| + |
| + } |
| + |
| + /* Stack adjustment for exception handler. */ |
| + if (current_function_calls_eh_return) |
| + fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO)); |
| + |
| + |
| + if (IS_INTERRUPT (func_type)) |
| + { |
| + fprintf (f, "\trete\n"); |
| + } |
| + else if (insert_ret) |
| + { |
| + if (r12_imm) |
| + fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm)); |
| + else |
| + fprintf (f, "\tretal\tr12\n"); |
| + } |
| +} |
| + |
| +/* Function for converting a fp-register mask to a |
| + reglistCPD8 register list string. */ |
| +void |
| +avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string) |
| +{ |
| + int i; |
| + |
| + /* Make sure reglist_string is empty */ |
| + reglist_string[0] = '\0'; |
| + |
| + for (i = 0; i < NUM_FP_REGS; i += 2) |
| + { |
| + if (reglist_mask & (1 << i)) |
| + { |
| + strlen (reglist_string) ? |
| + sprintf (reglist_string, "%s, %s-%s", reglist_string, |
| + reg_names[INTERNAL_FP_REGNUM (i)], |
| + reg_names[INTERNAL_FP_REGNUM (i + 1)]) : |
| + sprintf (reglist_string, "%s-%s", |
| + reg_names[INTERNAL_FP_REGNUM (i)], |
| + reg_names[INTERNAL_FP_REGNUM (i + 1)]); |
| + } |
| + } |
| +} |
| + |
| +/* Function for converting a fp-register mask to a |
| + reglistCP8 register list string. */ |
| +void |
| +avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string) |
| +{ |
| + int i; |
| + |
| + /* Make sure reglist_string is empty */ |
| + reglist_string[0] = '\0'; |
| + |
| + for (i = 0; i < NUM_FP_REGS; ++i) |
| + { |
| + if (reglist_mask & (1 << i)) |
| + { |
| + strlen (reglist_string) ? |
| + sprintf (reglist_string, "%s, %s", reglist_string, |
| + reg_names[INTERNAL_FP_REGNUM (i)]) : |
| + sprintf (reglist_string, "%s", reg_names[INTERNAL_FP_REGNUM (i)]); |
| + } |
| + } |
| +} |
| + |
| +void |
| +avr32_make_reglist16 (int reglist16_vect, char *reglist16_string) |
| +{ |
| + int i; |
| + |
| + /* Make sure reglist16_string is empty */ |
| + reglist16_string[0] = '\0'; |
| + |
| + for (i = 0; i < 16; ++i) |
| + { |
| + if (reglist16_vect & (1 << i)) |
| + { |
| + strlen (reglist16_string) ? |
| + sprintf (reglist16_string, "%s, %s", reglist16_string, |
| + reg_names[INTERNAL_REGNUM (i)]) : |
| + sprintf (reglist16_string, "%s", reg_names[INTERNAL_REGNUM (i)]); |
| + } |
| + } |
| +} |
| + |
| +int |
| +avr32_convert_to_reglist16 (int reglist8_vect) |
| +{ |
| + int reglist16_vect = 0; |
| + if (reglist8_vect & 0x1) |
| + reglist16_vect |= 0xF; |
| + if (reglist8_vect & 0x2) |
| + reglist16_vect |= 0xF0; |
| + if (reglist8_vect & 0x4) |
| + reglist16_vect |= 0x300; |
| + if (reglist8_vect & 0x8) |
| + reglist16_vect |= 0x400; |
| + if (reglist8_vect & 0x10) |
| + reglist16_vect |= 0x800; |
| + if (reglist8_vect & 0x20) |
| + reglist16_vect |= 0x1000; |
| + if (reglist8_vect & 0x40) |
| + reglist16_vect |= 0x4000; |
| + if (reglist8_vect & 0x80) |
| + reglist16_vect |= 0x8000; |
| + |
| + return reglist16_vect; |
| +} |
| + |
| +void |
| +avr32_make_reglist8 (int reglist8_vect, char *reglist8_string) |
| +{ |
| + /* Make sure reglist8_string is empty */ |
| + reglist8_string[0] = '\0'; |
| + |
| + if (reglist8_vect & 0x1) |
| + sprintf (reglist8_string, "r0-r3"); |
| + if (reglist8_vect & 0x2) |
| + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r4-r7", |
| + reglist8_string) : |
| + sprintf (reglist8_string, "r4-r7"); |
| + if (reglist8_vect & 0x4) |
| + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r8-r9", |
| + reglist8_string) : |
| + sprintf (reglist8_string, "r8-r9"); |
| + if (reglist8_vect & 0x8) |
| + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r10", |
| + reglist8_string) : |
| + sprintf (reglist8_string, "r10"); |
| + if (reglist8_vect & 0x10) |
| + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r11", |
| + reglist8_string) : |
| + sprintf (reglist8_string, "r11"); |
| + if (reglist8_vect & 0x20) |
| + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r12", |
| + reglist8_string) : |
| + sprintf (reglist8_string, "r12"); |
| + if (reglist8_vect & 0x40) |
| + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, lr", |
| + reglist8_string) : |
| + sprintf (reglist8_string, "lr"); |
| + if (reglist8_vect & 0x80) |
| + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, pc", |
| + reglist8_string) : |
| + sprintf (reglist8_string, "pc"); |
| +} |
| + |
| +int |
| +avr32_eh_return_data_regno (int n) |
| +{ |
| + if (n >= 0 && n <= 3) |
| + return 8 + n; |
| + else |
| + return INVALID_REGNUM; |
| +} |
| + |
| +/* Compute the distance from register FROM to register TO. |
| + These can be the arg pointer, the frame pointer or |
| + the stack pointer. |
| + Typical stack layout looks like this: |
| + |
| + old stack pointer -> | | |
| + ---- |
| + | | \ |
| + | | saved arguments for |
| + | | vararg functions |
| + arg_pointer -> | | / |
| + -- |
| + | | \ |
| + | | call saved |
| + | | registers |
| + | | / |
| + frame ptr -> -- |
| + | | \ |
| + | | local |
| + | | variables |
| + stack ptr --> | | / |
| + -- |
| + | | \ |
| + | | outgoing |
| + | | arguments |
| + | | / |
| + -- |
| + |
| + For a given funciton some or all of these stack compomnents |
| + may not be needed, giving rise to the possibility of |
| + eliminating some of the registers. |
| + |
| + The values returned by this function must reflect the behaviour |
| + of avr32_expand_prologue() and avr32_compute_save_reg_mask(). |
| + |
| + The sign of the number returned reflects the direction of stack |
| + growth, so the values are positive for all eliminations except |
| + from the soft frame pointer to the hard frame pointer. */ |
| + |
| + |
| +int |
| +avr32_initial_elimination_offset (int from, int to) |
| +{ |
| + int i; |
| + int call_saved_regs = 0; |
| + unsigned long saved_reg_mask, saved_fp_reg_mask; |
| + unsigned int local_vars = get_frame_size (); |
| + |
| + saved_reg_mask = avr32_compute_save_reg_mask (TRUE); |
| + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask (); |
| + |
| + for (i = 0; i < 16; ++i) |
| + { |
| + if (saved_reg_mask & (1 << i)) |
| + call_saved_regs += 4; |
| + } |
| + |
| + for (i = 0; i < NUM_FP_REGS; ++i) |
| + { |
| + if (saved_fp_reg_mask & (1 << i)) |
| + call_saved_regs += 4; |
| + } |
| + |
| + switch (from) |
| + { |
| + case ARG_POINTER_REGNUM: |
| + switch (to) |
| + { |
| + case STACK_POINTER_REGNUM: |
| + return call_saved_regs + local_vars; |
| + case FRAME_POINTER_REGNUM: |
| + return call_saved_regs; |
| + default: |
| + abort (); |
| + } |
| + case FRAME_POINTER_REGNUM: |
| + switch (to) |
| + { |
| + case STACK_POINTER_REGNUM: |
| + return local_vars; |
| + default: |
| + abort (); |
| + } |
| + default: |
| + abort (); |
| + } |
| +} |
| + |
| + |
| +/* |
| + Returns a rtx used when passing the next argument to a function. |
| + avr32_init_cumulative_args() and avr32_function_arg_advance() sets witch |
| + register to use. |
| +*/ |
| +rtx |
| +avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode, |
| + tree type, int named) |
| +{ |
| + int index = -1; |
| + |
| + HOST_WIDE_INT arg_size, arg_rsize; |
| + if (type) |
| + { |
| + arg_size = int_size_in_bytes (type); |
| + } |
| + else |
| + { |
| + arg_size = GET_MODE_SIZE (mode); |
| + } |
| + arg_rsize = PUSH_ROUNDING (arg_size); |
| + |
| + /* |
| + The last time this macro is called, it is called with mode == VOIDmode, |
| + and its result is passed to the call or call_value pattern as operands 2 |
| + and 3 respectively. */ |
| + if (mode == VOIDmode) |
| + { |
| + return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */ |
| + } |
| + |
| + if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named) |
| + { |
| + return NULL_RTX; |
| + } |
| + |
| + if (arg_rsize == 8) |
| + { |
| + /* use r11:r10 or r9:r8. */ |
| + if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2))) |
| + index = 1; |
| + else if (!(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4))) |
| + index = 3; |
| + else |
| + index = -1; |
| + } |
| + else if (arg_rsize == 4) |
| + { /* Use first available register */ |
| + index = 0; |
| + while (index <= LAST_CUM_REG_INDEX && GET_USED_INDEX (cum, index)) |
| + index++; |
| + if (index > LAST_CUM_REG_INDEX) |
| + index = -1; |
| + } |
| + |
| + SET_REG_INDEX (cum, index); |
| + |
| + if (GET_REG_INDEX (cum) >= 0) |
| + return gen_rtx_REG (mode, |
| + avr32_function_arg_reglist[GET_REG_INDEX (cum)]); |
| + |
| + return NULL_RTX; |
| +} |
| + |
| +/* |
| + Set the register used for passing the first argument to a function. |
| +*/ |
| +void |
| +avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, |
| + tree fntype ATTRIBUTE_UNUSED, |
| + rtx libname ATTRIBUTE_UNUSED, |
| + tree fndecl ATTRIBUTE_UNUSED) |
| + { |
| + /* Set all registers as unused. */ |
| + SET_INDEXES_UNUSED (cum); |
| + |
| + /* Reset uses_anonymous_args */ |
| + cum->uses_anonymous_args = 0; |
| + |
| + /* Reset size of stack pushed arguments */ |
| + cum->stack_pushed_args_size = 0; |
| + } |
| + |
| +/* |
| + Set register used for passing the next argument to a function. Only the |
| + Scratch Registers are used. |
| + |
| + number name |
| + 15 r15 PC |
| + 14 r14 LR |
| + 13 r13 _SP_________ |
| + FIRST_CUM_REG 12 r12 _||_ |
| + 10 r11 || |
| + 11 r10 _||_ Scratch Registers |
| + 8 r9 || |
| + LAST_SCRATCH_REG 9 r8 _\/_________ |
| + 6 r7 /\ |
| + 7 r6 || |
| + 4 r5 || |
| + 5 r4 || |
| + 2 r3 || |
| + 3 r2 || |
| + 0 r1 || |
| + 1 r0 _||_________ |
| + |
| +*/ |
| +void |
| +avr32_function_arg_advance (CUMULATIVE_ARGS * cum, enum machine_mode mode, |
| + tree type, int named ATTRIBUTE_UNUSED) |
| +{ |
| + HOST_WIDE_INT arg_size, arg_rsize; |
| + |
| + if (type) |
| + { |
| + arg_size = int_size_in_bytes (type); |
| + } |
| + else |
| + { |
| + arg_size = GET_MODE_SIZE (mode); |
| + } |
| + arg_rsize = PUSH_ROUNDING (arg_size); |
| + |
| + /* It the argument had to be passed in stack, no register is used. */ |
| + if ((*targetm.calls.must_pass_in_stack) (mode, type)) |
| + { |
| + cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type)); |
| + return; |
| + } |
| + |
| + /* Mark the used registers as "used". */ |
| + if (GET_REG_INDEX (cum) >= 0) |
| + { |
| + SET_USED_INDEX (cum, GET_REG_INDEX (cum)); |
| + if (arg_rsize == 8) |
| + { |
| + SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1)); |
| + } |
| + } |
| + else |
| + { |
| + /* Had to use stack */ |
| + cum->stack_pushed_args_size += arg_rsize; |
| + } |
| +} |
| + |
| +/* |
| + Defines witch direction to go to find the next register to use if the |
| + argument is larger then one register or for arguments shorter than an |
| + int which is not promoted, such as the last part of structures with |
| + size not a multiple of 4. */ |
| +enum direction |
| +avr32_function_arg_padding (enum machine_mode mode ATTRIBUTE_UNUSED, |
| + tree type) |
| +{ |
| + /* Pad upward for all aggregates except byte and halfword sized aggregates |
| + which can be passed in registers. */ |
| + if (type |
| + && AGGREGATE_TYPE_P (type) |
| + && (int_size_in_bytes (type) != 1) |
| + && !((int_size_in_bytes (type) == 2) |
| + && TYPE_ALIGN_UNIT (type) >= 2) |
| + && (int_size_in_bytes (type) & 0x3)) |
| + { |
| + return upward; |
| + } |
| + |
| + return downward; |
| +} |
| + |
| +/* |
| + Return a rtx used for the return value from a function call. |
| +*/ |
| +rtx |
| +avr32_function_value (tree type, tree func, bool outgoing ATTRIBUTE_UNUSED) |
| +{ |
| + if (avr32_return_in_memory (type, func)) |
| + return NULL_RTX; |
| + |
| + if (int_size_in_bytes (type) <= 4) |
| + if (avr32_return_in_msb (type)) |
| + /* Aggregates of size less than a word which does align the data in the |
| + MSB must use SImode for r12. */ |
| + return gen_rtx_REG (SImode, RET_REGISTER); |
| + else |
| + return gen_rtx_REG (TYPE_MODE (type), RET_REGISTER); |
| + else if (int_size_in_bytes (type) <= 8) |
| + return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11)); |
| + |
| + return NULL_RTX; |
| +} |
| + |
| +/* |
| + Return a rtx used for the return value from a library function call. |
| +*/ |
| +rtx |
| +avr32_libcall_value (enum machine_mode mode) |
| +{ |
| + |
| + if (GET_MODE_SIZE (mode) <= 4) |
| + return gen_rtx_REG (mode, RET_REGISTER); |
| + else if (GET_MODE_SIZE (mode) <= 8) |
| + return gen_rtx_REG (mode, INTERNAL_REGNUM (11)); |
| + else |
| + return NULL_RTX; |
| +} |
| + |
| +/* Return TRUE if X references a SYMBOL_REF. */ |
| +int |
| +symbol_mentioned_p (rtx x) |
| +{ |
| + const char *fmt; |
| + int i; |
| + |
| + if (GET_CODE (x) == SYMBOL_REF) |
| + return 1; |
| + |
| + fmt = GET_RTX_FORMAT (GET_CODE (x)); |
| + |
| + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
| + { |
| + if (fmt[i] == 'E') |
| + { |
| + int j; |
| + |
| + for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
| + if (symbol_mentioned_p (XVECEXP (x, i, j))) |
| + return 1; |
| + } |
| + else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i))) |
| + return 1; |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +/* Return TRUE if X references a LABEL_REF. */ |
| +int |
| +label_mentioned_p (rtx x) |
| +{ |
| + const char *fmt; |
| + int i; |
| + |
| + if (GET_CODE (x) == LABEL_REF) |
| + return 1; |
| + |
| + fmt = GET_RTX_FORMAT (GET_CODE (x)); |
| + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
| + { |
| + if (fmt[i] == 'E') |
| + { |
| + int j; |
| + |
| + for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
| + if (label_mentioned_p (XVECEXP (x, i, j))) |
| + return 1; |
| + } |
| + else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i))) |
| + return 1; |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +/* Return TRUE if X contains a MEM expression. */ |
| +int |
| +mem_mentioned_p (rtx x) |
| +{ |
| + const char *fmt; |
| + int i; |
| + |
| + if (MEM_P (x)) |
| + return 1; |
| + |
| + fmt = GET_RTX_FORMAT (GET_CODE (x)); |
| + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
| + { |
| + if (fmt[i] == 'E') |
| + { |
| + int j; |
| + |
| + for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
| + if (mem_mentioned_p (XVECEXP (x, i, j))) |
| + return 1; |
| + } |
| + else if (fmt[i] == 'e' && mem_mentioned_p (XEXP (x, i))) |
| + return 1; |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +int |
| +avr32_legitimate_pic_operand_p (rtx x) |
| +{ |
| + |
| + /* We can't have const, this must be broken down to a symbol. */ |
| + if (GET_CODE (x) == CONST) |
| + return FALSE; |
| + |
| + /* Can't access symbols or labels via the constant pool either */ |
| + if ((GET_CODE (x) == SYMBOL_REF |
| + && CONSTANT_POOL_ADDRESS_P (x) |
| + && (symbol_mentioned_p (get_pool_constant (x)) |
| + || label_mentioned_p (get_pool_constant (x))))) |
| + return FALSE; |
| + |
| + return TRUE; |
| +} |
| + |
| + |
| +rtx |
| +legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED, |
| + rtx reg) |
| +{ |
| + |
| + if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF) |
| + { |
| + int subregs = 0; |
| + |
| + if (reg == 0) |
| + { |
| + if (no_new_pseudos) |
| + abort (); |
| + else |
| + reg = gen_reg_rtx (Pmode); |
| + |
| + subregs = 1; |
| + } |
| + |
| + emit_move_insn (reg, orig); |
| + |
| + /* Only set current function as using pic offset table if flag_pic is |
| + set. This is because this function is also used if |
| + TARGET_HAS_ASM_ADDR_PSEUDOS is set. */ |
| + if (flag_pic) |
| + current_function_uses_pic_offset_table = 1; |
| + |
| + /* Put a REG_EQUAL note on this insn, so that it can be optimized by |
| + loop. */ |
| + return reg; |
| + } |
| + else if (GET_CODE (orig) == CONST) |
| + { |
| + rtx base, offset; |
| + |
| + if (flag_pic |
| + && GET_CODE (XEXP (orig, 0)) == PLUS |
| + && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx) |
| + return orig; |
| + |
| + if (reg == 0) |
| + { |
| + if (no_new_pseudos) |
| + abort (); |
| + else |
| + reg = gen_reg_rtx (Pmode); |
| + } |
| + |
| + if (GET_CODE (XEXP (orig, 0)) == PLUS) |
| + { |
| + base = |
| + legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg); |
| + offset = |
| + legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode, |
| + base == reg ? 0 : reg); |
| + } |
| + else |
| + abort (); |
| + |
| + if (GET_CODE (offset) == CONST_INT) |
| + { |
| + /* The base register doesn't really matter, we only want to test |
| + the index for the appropriate mode. */ |
| + if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21")) |
| + { |
| + if (!no_new_pseudos) |
| + offset = force_reg (Pmode, offset); |
| + else |
| + abort (); |
| + } |
| + |
| + if (GET_CODE (offset) == CONST_INT) |
| + return plus_constant (base, INTVAL (offset)); |
| + } |
| + |
| + return gen_rtx_PLUS (Pmode, base, offset); |
| + } |
| + |
| + return orig; |
| +} |
| + |
| +/* Generate code to load the PIC register. */ |
| +void |
| +avr32_load_pic_register (void) |
| +{ |
| + rtx l1, pic_tmp; |
| + rtx global_offset_table; |
| + |
| + if ((current_function_uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT) |
| + return; |
| + |
| + if (!flag_pic) |
| + abort (); |
| + |
| + l1 = gen_label_rtx (); |
| + |
| + global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_"); |
| + pic_tmp = |
| + gen_rtx_CONST (Pmode, |
| + gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1), |
| + global_offset_table)); |
| + emit_insn (gen_pic_load_addr |
| + (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp))); |
| + emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1)); |
| + |
| + /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp |
| + can cause life info to screw up. */ |
| + emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx)); |
| +} |
| + |
| + |
| + |
| +/* This hook should return true if values of type type are returned at the most |
| + significant end of a register (in other words, if they are padded at the |
| + least significant end). You can assume that type is returned in a register; |
| + the caller is required to check this. Note that the register provided by |
| + FUNCTION_VALUE must be able to hold the complete return value. For example, |
| + if a 1-, 2- or 3-byte structure is returned at the most significant end of a |
| + 4-byte register, FUNCTION_VALUE should provide an SImode rtx. */ |
| +bool |
| +avr32_return_in_msb (tree type ATTRIBUTE_UNUSED) |
| +{ |
| + /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) || |
| + ((int_size_in_bytes(type) == 2) && TYPE_ALIGN_UNIT(type) >= 2)) return |
| + false; else return true; */ |
| + |
| + return false; |
| +} |
| + |
| + |
| +/* |
| + Returns one if a certain function value is going to be returned in memory |
| + and zero if it is going to be returned in a register. |
| + |
| + BLKmode and all other modes that is larger than 64 bits are returned in |
| + memory. |
| +*/ |
| +bool |
| +avr32_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED) |
| +{ |
| + if (TYPE_MODE (type) == VOIDmode) |
| + return false; |
| + |
| + if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD) |
| + || int_size_in_bytes (type) == -1) |
| + { |
| + return true; |
| + } |
| + |
| + /* If we have an aggregate then use the same mechanism as when checking if |
| + it should be passed on the stack. */ |
| + if (type |
| + && AGGREGATE_TYPE_P (type) |
| + && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type)) |
| + return true; |
| + |
| + return false; |
| +} |
| + |
| + |
| +/* Output the constant part of the trampoline. |
| + lddpc r0, pc[0x8:e] ; load static chain register |
| + lddpc pc, pc[0x8:e] ; jump to subrutine |
| + .long 0 ; Address to static chain, |
| + ; filled in by avr32_initialize_trampoline() |
| + .long 0 ; Address to subrutine, |
| + ; filled in by avr32_initialize_trampoline() |
| +*/ |
| +void |
| +avr32_trampoline_template (FILE * file) |
| +{ |
| + fprintf (file, "\tlddpc r0, pc[8]\n"); |
| + fprintf (file, "\tlddpc pc, pc[8]\n"); |
| + /* make room for the address of the static chain. */ |
| + fprintf (file, "\t.long\t0\n"); |
| + /* make room for the address to the subrutine. */ |
| + fprintf (file, "\t.long\t0\n"); |
| +} |
| + |
| + |
| +/* |
| + Initialize the variable parts of a trampoline. |
| +*/ |
| +void |
| +avr32_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain) |
| +{ |
| + /* Store the address to the static chain. */ |
| + emit_move_insn (gen_rtx_MEM |
| + (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)), |
| + static_chain); |
| + |
| + /* Store the address to the function. */ |
| + emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)), |
| + fnaddr); |
| + |
| + emit_insn (gen_cache (gen_rtx_REG (SImode, 13), |
| + gen_rtx_CONST_INT (SImode, |
| + AVR32_CACHE_INVALIDATE_ICACHE))); |
| +} |
| + |
| +/* Return nonzero if X is valid as an addressing register. */ |
| +int |
| +avr32_address_register_rtx_p (rtx x, int strict_p) |
| +{ |
| + int regno; |
| + |
| + if (!register_operand(x, GET_MODE(x))) |
| + return 0; |
| + |
| + /* If strict we require the register to be a hard register. */ |
| + if (strict_p |
| + && !REG_P(x)) |
| + return 0; |
| + |
| + regno = REGNO (x); |
| + |
| + if (strict_p) |
| + return REGNO_OK_FOR_BASE_P (regno); |
| + |
| + return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER); |
| +} |
| + |
| +/* Return nonzero if INDEX is valid for an address index operand. */ |
| +int |
| +avr32_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p) |
| +{ |
| + enum rtx_code code = GET_CODE (index); |
| + |
| + if (GET_MODE_SIZE (mode) > 8) |
| + return 0; |
| + |
| + /* Standard coprocessor addressing modes. */ |
| + if (code == CONST_INT) |
| + { |
| + if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT) |
| + /* Coprocessor mem insns has a smaller reach than ordinary mem insns */ |
| + return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ku14"); |
| + else |
| + return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16"); |
| + } |
| + |
| + if (avr32_address_register_rtx_p (index, strict_p)) |
| + return 1; |
| + |
| + if (code == MULT) |
| + { |
| + rtx xiop0 = XEXP (index, 0); |
| + rtx xiop1 = XEXP (index, 1); |
| + return ((avr32_address_register_rtx_p (xiop0, strict_p) |
| + && power_of_two_operand (xiop1, SImode) |
| + && (INTVAL (xiop1) <= 8)) |
| + || (avr32_address_register_rtx_p (xiop1, strict_p) |
| + && power_of_two_operand (xiop0, SImode) |
| + && (INTVAL (xiop0) <= 8))); |
| + } |
| + else if (code == ASHIFT) |
| + { |
| + rtx op = XEXP (index, 1); |
| + |
| + return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p) |
| + && GET_CODE (op) == CONST_INT |
| + && INTVAL (op) > 0 && INTVAL (op) <= 3); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +/* |
| + Used in the GO_IF_LEGITIMATE_ADDRESS macro. Returns a nonzero value if |
| + the RTX x is a legitimate memory address. |
| + |
| + Returns NO_REGS if the address is not legatime, GENERAL_REGS or ALL_REGS |
| + if it is. |
| +*/ |
| + |
| +/* Forward declaration*/ |
| +int is_minipool_label (rtx label); |
| + |
| +int |
| +avr32_legitimate_address (enum machine_mode mode, rtx x, int strict) |
| +{ |
| + |
| + switch (GET_CODE (x)) |
| + { |
| + case REG: |
| + return avr32_address_register_rtx_p (x, strict); |
| + case CONST_INT: |
| + return ((mode==SImode) |
| + && CONST_OK_FOR_CONSTRAINT_P(INTVAL(x), 'K', "Ks17")); |
| + case CONST: |
| + { |
| + rtx label = avr32_find_symbol (x); |
| + if (label |
| + && |
| + ((CONSTANT_POOL_ADDRESS_P (label) |
| + && !(flag_pic |
| + && (symbol_mentioned_p (get_pool_constant (label)) |
| + || label_mentioned_p (get_pool_constant (label))))) |
| + /* TODO! Can this ever happen??? */ |
| + || ((GET_CODE (label) == LABEL_REF) |
| + && GET_CODE (XEXP (label, 0)) == CODE_LABEL |
| + && is_minipool_label (XEXP (label, 0))) |
| + /*|| ((GET_CODE (label) == SYMBOL_REF) |
| + && mode == SImode |
| + && SYMBOL_REF_RMW_ADDR(label))*/)) |
| + { |
| + return TRUE; |
| + } |
| + } |
| + break; |
| + case LABEL_REF: |
| + if (GET_CODE (XEXP (x, 0)) == CODE_LABEL |
| + && is_minipool_label (XEXP (x, 0))) |
| + { |
| + return TRUE; |
| + } |
| + break; |
| + case SYMBOL_REF: |
| + { |
| + if (CONSTANT_POOL_ADDRESS_P (x) |
| + && !(flag_pic |
| + && (symbol_mentioned_p (get_pool_constant (x)) |
| + || label_mentioned_p (get_pool_constant (x))))) |
| + return TRUE; |
| + else if (SYMBOL_REF_RCALL_FUNCTION_P (x) |
| + || (mode == SImode |
| + && SYMBOL_REF_RMW_ADDR (x))) |
| + return TRUE; |
| + break; |
| + } |
| + case PRE_DEC: /* (pre_dec (...)) */ |
| + case POST_INC: /* (post_inc (...)) */ |
| + return avr32_address_register_rtx_p (XEXP (x, 0), strict); |
| + case PLUS: /* (plus (...) (...)) */ |
| + { |
| + rtx xop0 = XEXP (x, 0); |
| + rtx xop1 = XEXP (x, 1); |
| + |
| + return ((avr32_address_register_rtx_p (xop0, strict) |
| + && avr32_legitimate_index_p (mode, xop1, strict)) |
| + || (avr32_address_register_rtx_p (xop1, strict) |
| + && avr32_legitimate_index_p (mode, xop0, strict))); |
| + } |
| + default: |
| + break; |
| + } |
| + |
| + return FALSE; |
| +} |
| + |
| + |
| +int |
| +avr32_const_ok_for_move (HOST_WIDE_INT c) |
| +{ |
| + if ( TARGET_V2_INSNS ) |
| + return ( avr32_const_ok_for_constraint_p (c, 'K', "Ks21") |
| + /* movh instruction */ |
| + || avr32_hi16_immediate_operand (GEN_INT(c), VOIDmode) ); |
| + else |
| + return avr32_const_ok_for_constraint_p (c, 'K', "Ks21"); |
| +} |
| + |
| +int |
| +avr32_const_double_immediate (rtx value) |
| +{ |
| + HOST_WIDE_INT hi, lo; |
| + |
| + if (GET_CODE (value) != CONST_DOUBLE) |
| + return FALSE; |
| + |
| + if (SCALAR_FLOAT_MODE_P (GET_MODE (value))) |
| + { |
| + HOST_WIDE_INT target_float[2]; |
| + hi = lo = 0; |
| + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value), |
| + GET_MODE (value)); |
| + lo = target_float[0]; |
| + hi = target_float[1]; |
| + } |
| + else |
| + { |
| + hi = CONST_DOUBLE_HIGH (value); |
| + lo = CONST_DOUBLE_LOW (value); |
| + } |
| + |
| + if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21") |
| + && (GET_MODE (value) == SFmode |
| + || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21"))) |
| + { |
| + return TRUE; |
| + } |
| + |
| + return FALSE; |
| +} |
| + |
| + |
| +int |
| +avr32_legitimate_constant_p (rtx x) |
| +{ |
| + switch (GET_CODE (x)) |
| + { |
| + case CONST_INT: |
| + /* Check if we should put large immediate into constant pool |
| + or load them directly with mov/orh.*/ |
| + if (!avr32_imm_in_const_pool) |
| + return 1; |
| + |
| + return avr32_const_ok_for_move (INTVAL (x)); |
| + case CONST_DOUBLE: |
| + /* Check if we should put large immediate into constant pool |
| + or load them directly with mov/orh.*/ |
| + if (!avr32_imm_in_const_pool) |
| + return 1; |
| + |
| + if (GET_MODE (x) == SFmode |
| + || GET_MODE (x) == DFmode || GET_MODE (x) == DImode) |
| + return avr32_const_double_immediate (x); |
| + else |
| + return 0; |
| + case LABEL_REF: |
| + case SYMBOL_REF: |
| + return avr32_find_symbol (x) && (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS); |
| + case CONST: |
| + case HIGH: |
| + case CONST_VECTOR: |
| + return 0; |
| + default: |
| + printf ("%s():\n", __FUNCTION__); |
| + debug_rtx (x); |
| + return 1; |
| + } |
| +} |
| + |
| + |
| +/* Strip any special encoding from labels */ |
| +const char * |
| +avr32_strip_name_encoding (const char *name) |
| +{ |
| + const char *stripped = name; |
| + |
| + while (1) |
| + { |
| + switch (stripped[0]) |
| + { |
| + case '#': |
| + stripped = strchr (name + 1, '#') + 1; |
| + break; |
| + case '*': |
| + stripped = &stripped[1]; |
| + break; |
| + default: |
| + return stripped; |
| + } |
| + } |
| +} |
| + |
| + |
| + |
| +/* Do anything needed before RTL is emitted for each function. */ |
| +static struct machine_function * |
| +avr32_init_machine_status (void) |
| +{ |
| + struct machine_function *machine; |
| + machine = |
| + (machine_function *) ggc_alloc_cleared (sizeof (machine_function)); |
| + |
| +#if AVR32_FT_UNKNOWN != 0 |
| + machine->func_type = AVR32_FT_UNKNOWN; |
| +#endif |
| + |
| + machine->minipool_label_head = 0; |
| + machine->minipool_label_tail = 0; |
| + machine->ifcvt_after_reload = 0; |
| + return machine; |
| +} |
| + |
| +void |
| +avr32_init_expanders (void) |
| +{ |
| + /* Arrange to initialize and mark the machine per-function status. */ |
| + init_machine_status = avr32_init_machine_status; |
| +} |
| + |
| + |
| +/* Return an RTX indicating where the return address to the |
| + calling function can be found. */ |
| + |
| +rtx |
| +avr32_return_addr (int count, rtx frame ATTRIBUTE_UNUSED) |
| +{ |
| + if (count != 0) |
| + return NULL_RTX; |
| + |
| + return get_hard_reg_initial_val (Pmode, LR_REGNUM); |
| +} |
| + |
| + |
| +void |
| +avr32_encode_section_info (tree decl, rtx rtl, int first) |
| +{ |
| + default_encode_section_info(decl, rtl, first); |
| + |
| + if ( TREE_CODE (decl) == VAR_DECL |
| + && (GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF) |
| + && (lookup_attribute ("rmw_addressable", DECL_ATTRIBUTES (decl)) |
| + || TARGET_RMW_ADDRESSABLE_DATA) ){ |
| + if ( !TARGET_RMW || flag_pic ) |
| + return; |
| + SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT); |
| + } |
| +} |
| + |
| +void |
| +avr32_asm_output_label (FILE * stream, const char *name) |
| +{ |
| + name = avr32_strip_name_encoding (name); |
| + |
| + /* Print the label. */ |
| + assemble_name (stream, name); |
| + fprintf (stream, ":\n"); |
| +} |
| + |
| + |
| + |
| +void |
| +avr32_asm_weaken_label (FILE * stream, const char *name) |
| +{ |
| + fprintf (stream, "\t.weak "); |
| + assemble_name (stream, name); |
| + fprintf (stream, "\n"); |
| +} |
| + |
| +/* |
| + Checks if a labelref is equal to a reserved word in the assembler. If it is, |
| + insert a '_' before the label name. |
| +*/ |
| +void |
| +avr32_asm_output_labelref (FILE * stream, const char *name) |
| +{ |
| + int verbatim = FALSE; |
| + const char *stripped = name; |
| + int strip_finished = FALSE; |
| + |
| + while (!strip_finished) |
| + { |
| + switch (stripped[0]) |
| + { |
| + case '#': |
| + stripped = strchr (name + 1, '#') + 1; |
| + break; |
| + case '*': |
| + stripped = &stripped[1]; |
| + verbatim = TRUE; |
| + break; |
| + default: |
| + strip_finished = TRUE; |
| + break; |
| + } |
| + } |
| + |
| + if (verbatim) |
| + fputs (stripped, stream); |
| + else |
| + asm_fprintf (stream, "%U%s", stripped); |
| +} |
| + |
| + |
| + |
| +/* |
| + Check if the comparison in compare_exp is redundant |
| + for the condition given in next_cond given that the |
| + needed flags are already set by an earlier instruction. |
| + Uses cc_prev_status to check this. |
| + |
| + Returns NULL_RTX if the compare is not redundant |
| + or the new condition to use in the conditional |
| + instruction if the compare is redundant. |
| +*/ |
| +static rtx |
| +is_compare_redundant (rtx compare_exp, rtx next_cond) |
| +{ |
| + int z_flag_valid = FALSE; |
| + int n_flag_valid = FALSE; |
| + rtx new_cond; |
| + |
| + if (GET_CODE (compare_exp) != COMPARE |
| + && GET_CODE (compare_exp) != AND) |
| + return NULL_RTX; |
| + |
| + |
| + if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp)) |
| + { |
| + /* cc0 already contains the correct comparison -> delete cmp insn */ |
| + return next_cond; |
| + } |
| + |
| + if (GET_MODE (compare_exp) != SImode) |
| + return NULL_RTX; |
| + |
| + switch (cc_prev_status.mdep.flags) |
| + { |
| + case CC_SET_VNCZ: |
| + case CC_SET_NCZ: |
| + n_flag_valid = TRUE; |
| + case CC_SET_CZ: |
| + case CC_SET_Z: |
| + z_flag_valid = TRUE; |
| + } |
| + |
| + if (cc_prev_status.mdep.value |
| + && GET_CODE (compare_exp) == COMPARE |
| + && REG_P (XEXP (compare_exp, 0)) |
| + && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value) |
| + && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT |
| + && next_cond != NULL_RTX) |
| + { |
| + if (INTVAL (XEXP (compare_exp, 1)) == 0 |
| + && z_flag_valid |
| + && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE)) |
| + /* We can skip comparison Z flag is already reflecting ops[0] */ |
| + return next_cond; |
| + else if (n_flag_valid |
| + && ((INTVAL (XEXP (compare_exp, 1)) == 0 |
| + && (GET_CODE (next_cond) == GE |
| + || GET_CODE (next_cond) == LT)) |
| + || (INTVAL (XEXP (compare_exp, 1)) == -1 |
| + && (GET_CODE (next_cond) == GT |
| + || GET_CODE (next_cond) == LE)))) |
| + { |
| + /* We can skip comparison N flag is already reflecting ops[0], |
| + which means that we can use the mi/pl conditions to check if |
| + ops[0] is GE or LT 0. */ |
| + if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT)) |
| + new_cond = |
| + gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx), |
| + UNSPEC_COND_PL); |
| + else |
| + new_cond = |
| + gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx), |
| + UNSPEC_COND_MI); |
| + return new_cond; |
| + } |
| + } |
| + return NULL_RTX; |
| +} |
| + |
| +/* Updates cc_status. */ |
| +void |
| +avr32_notice_update_cc (rtx exp, rtx insn) |
| +{ |
| + enum attr_cc attr_cc = get_attr_cc (insn); |
| + |
| + if ( attr_cc == CC_SET_Z_IF_NOT_V2 ) |
| + if (TARGET_V2_INSNS) |
| + attr_cc = CC_NONE; |
| + else |
| + attr_cc = CC_SET_Z; |
| + |
| + switch (attr_cc) |
| + { |
| + case CC_CALL_SET: |
| + CC_STATUS_INIT; |
| + FPCC_STATUS_INIT; |
| + /* Check if the function call returns a value in r12 */ |
| + if (REG_P (recog_data.operand[0]) |
| + && REGNO (recog_data.operand[0]) == RETVAL_REGNUM) |
| + { |
| + cc_status.flags = 0; |
| + cc_status.mdep.value = |
| + gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx); |
| + cc_status.mdep.flags = CC_SET_VNCZ; |
| + cc_status.mdep.cond_exec_cmp_clobbered = 0; |
| + |
| + } |
| + break; |
| + case CC_COMPARE: |
| + { |
| + /* Check that compare will not be optimized away if so nothing should |
| + be done */ |
| + rtx compare_exp = SET_SRC (exp); |
| + /* Check if we have a tst expression. If so convert it to a |
| + compare with 0. */ |
| + if ( REG_P (SET_SRC (exp)) ) |
| + compare_exp = gen_rtx_COMPARE (GET_MODE (SET_SRC (exp)), |
| + SET_SRC (exp), |
| + const0_rtx); |
| + |
| + if (is_compare_redundant (compare_exp, get_next_insn_cond (insn)) == |
| + NULL_RTX) |
| + { |
| + |
| + /* Reset the nonstandard flag */ |
| + CC_STATUS_INIT; |
| + cc_status.flags = 0; |
| + cc_status.mdep.value = compare_exp; |
| + cc_status.mdep.flags = CC_SET_VNCZ; |
| + cc_status.mdep.cond_exec_cmp_clobbered = 0; |
| + } |
| + } |
| + break; |
| + case CC_CMP_COND_INSN: |
| + { |
| + /* Conditional insn that emit the compare itself. */ |
| + rtx cmp; |
| + rtx cmp_op0, cmp_op1; |
| + rtx cond; |
| + rtx dest; |
| + rtx next_insn = next_nonnote_insn (insn); |
| + |
| + if ( GET_CODE (exp) == COND_EXEC ) |
| + { |
| + cmp_op0 = XEXP (COND_EXEC_TEST (exp), 0); |
| + cmp_op1 = XEXP (COND_EXEC_TEST (exp), 1); |
| + cond = COND_EXEC_TEST (exp); |
| + dest = SET_DEST (COND_EXEC_CODE (exp)); |
| + } |
| + else |
| + { |
| + /* If then else conditional. compare operands are in operands |
| + 4 and 5. */ |
| + cmp_op0 = recog_data.operand[4]; |
| + cmp_op1 = recog_data.operand[5]; |
| + cond = recog_data.operand[1]; |
| + dest = SET_DEST (exp); |
| + } |
| + |
| + if ( GET_CODE (cmp_op0) == AND ) |
| + cmp = cmp_op0; |
| + else |
| + cmp = gen_rtx_COMPARE (GET_MODE (cmp_op0), |
| + cmp_op0, |
| + cmp_op1); |
| + |
| + /* Check if the conditional insns updates a register present |
| + in the comparison, if so then we must reset the cc_status. */ |
| + if (REG_P (dest) |
| + && (reg_mentioned_p (dest, cmp_op0) |
| + || reg_mentioned_p (dest, cmp_op1)) |
| + && GET_CODE (exp) != COND_EXEC ) |
| + { |
| + CC_STATUS_INIT; |
| + } |
| + else if (is_compare_redundant (cmp, cond) == NULL_RTX) |
| + { |
| + /* Reset the nonstandard flag */ |
| + CC_STATUS_INIT; |
| + if ( GET_CODE (cmp_op0) == AND ) |
| + { |
| + cc_status.flags = CC_INVERTED; |
| + cc_status.mdep.flags = CC_SET_Z; |
| + } |
| + else |
| + { |
| + cc_status.flags = 0; |
| + cc_status.mdep.flags = CC_SET_VNCZ; |
| + } |
| + cc_status.mdep.value = cmp; |
| + cc_status.mdep.cond_exec_cmp_clobbered = 0; |
| + } |
| + |
| + |
| + /* Check if we have a COND_EXEC insn which updates one |
| + of the registers in the compare status. */ |
| + if (REG_P (dest) |
| + && (reg_mentioned_p (dest, cmp_op0) |
| + || reg_mentioned_p (dest, cmp_op1)) |
| + && GET_CODE (exp) == COND_EXEC ) |
| + cc_status.mdep.cond_exec_cmp_clobbered = 1; |
| + |
| + if ( cc_status.mdep.cond_exec_cmp_clobbered |
| + && GET_CODE (exp) == COND_EXEC |
| + && next_insn != NULL |
| + && INSN_P (next_insn) |
| + && !(GET_CODE (PATTERN (next_insn)) == COND_EXEC |
| + && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0), cmp_op0) |
| + && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1), cmp_op1) |
| + && (GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == GET_CODE (cond) |
| + || GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == reverse_condition (GET_CODE (cond)))) ) |
| + { |
| + /* We have a sequence of conditional insns where the compare status has been clobbered |
| + since the compare no longer reflects the content of the values to compare. */ |
| + CC_STATUS_INIT; |
| + cc_status.mdep.cond_exec_cmp_clobbered = 1; |
| + } |
| + |
| + } |
| + break; |
| + case CC_FPCOMPARE: |
| + /* Check that floating-point compare will not be optimized away if so |
| + nothing should be done */ |
| + if (!rtx_equal_p (cc_prev_status.mdep.fpvalue, SET_SRC (exp))) |
| + { |
| + /* cc0 already contains the correct comparison -> delete cmp insn */ |
| + /* Reset the nonstandard flag */ |
| + cc_status.mdep.fpvalue = SET_SRC (exp); |
| + cc_status.mdep.fpflags = CC_SET_CZ; |
| + } |
| + break; |
| + case CC_FROM_FPCC: |
| + /* Flags are updated with flags from Floating-point coprocessor, set |
| + CC_NOT_SIGNED flag since the flags are set so that unsigned |
| + condidion codes can be used directly. */ |
| + CC_STATUS_INIT; |
| + cc_status.flags = CC_NOT_SIGNED; |
| + cc_status.mdep.value = cc_status.mdep.fpvalue; |
| + cc_status.mdep.flags = cc_status.mdep.fpflags; |
| + break; |
| + case CC_BLD: |
| + /* Bit load is kind of like an inverted testsi, because the Z flag is |
| + inverted */ |
| + CC_STATUS_INIT; |
| + cc_status.flags = CC_INVERTED; |
| + cc_status.mdep.value = SET_SRC (exp); |
| + cc_status.mdep.flags = CC_SET_Z; |
| + cc_status.mdep.cond_exec_cmp_clobbered = 0; |
| + break; |
| + case CC_NONE: |
| + /* Insn does not affect CC at all. Check if the instruction updates |
| + some of the register currently reflected in cc0 */ |
| + |
| + if ((GET_CODE (exp) == SET) |
| + && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value) |
| + && (reg_mentioned_p (SET_DEST (exp), cc_status.value1) |
| + || reg_mentioned_p (SET_DEST (exp), cc_status.value2) |
| + || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value))) |
| + { |
| + CC_STATUS_INIT; |
| + } |
| + |
| + /* If this is a parallel we must step through each of the parallel |
| + expressions */ |
| + if (GET_CODE (exp) == PARALLEL) |
| + { |
| + int i; |
| + for (i = 0; i < XVECLEN (exp, 0); ++i) |
| + { |
| + rtx vec_exp = XVECEXP (exp, 0, i); |
| + if ((GET_CODE (vec_exp) == SET) |
| + && (cc_status.value1 || cc_status.value2 |
| + || cc_status.mdep.value) |
| + && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1) |
| + || reg_mentioned_p (SET_DEST (vec_exp), |
| + cc_status.value2) |
| + || reg_mentioned_p (SET_DEST (vec_exp), |
| + cc_status.mdep.value))) |
| + { |
| + CC_STATUS_INIT; |
| + } |
| + } |
| + } |
| + |
| + /* Check if we have memory opartions with post_inc or pre_dec on the |
| + register currently reflected in cc0 */ |
| + if (GET_CODE (exp) == SET |
| + && GET_CODE (SET_SRC (exp)) == MEM |
| + && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC |
| + || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC) |
| + && |
| + (reg_mentioned_p |
| + (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1) |
| + || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0), |
| + cc_status.value2) |
| + || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0), |
| + cc_status.mdep.value))) |
| + CC_STATUS_INIT; |
| + |
| + if (GET_CODE (exp) == SET |
| + && GET_CODE (SET_DEST (exp)) == MEM |
| + && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC |
| + || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC) |
| + && |
| + (reg_mentioned_p |
| + (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1) |
| + || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0), |
| + cc_status.value2) |
| + || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0), |
| + cc_status.mdep.value))) |
| + CC_STATUS_INIT; |
| + break; |
| + |
| + case CC_SET_VNCZ: |
| + CC_STATUS_INIT; |
| + cc_status.mdep.value = recog_data.operand[0]; |
| + cc_status.mdep.flags = CC_SET_VNCZ; |
| + cc_status.mdep.cond_exec_cmp_clobbered = 0; |
| + break; |
| + |
| + case CC_SET_NCZ: |
| + CC_STATUS_INIT; |
| + cc_status.mdep.value = recog_data.operand[0]; |
| + cc_status.mdep.flags = CC_SET_NCZ; |
| + cc_status.mdep.cond_exec_cmp_clobbered = 0; |
| + break; |
| + |
| + case CC_SET_CZ: |
| + CC_STATUS_INIT; |
| + cc_status.mdep.value = recog_data.operand[0]; |
| + cc_status.mdep.flags = CC_SET_CZ; |
| + cc_status.mdep.cond_exec_cmp_clobbered = 0; |
| + break; |
| + |
| + case CC_SET_Z: |
| + CC_STATUS_INIT; |
| + cc_status.mdep.value = recog_data.operand[0]; |
| + cc_status.mdep.flags = CC_SET_Z; |
| + cc_status.mdep.cond_exec_cmp_clobbered = 0; |
| + break; |
| + |
| + case CC_CLOBBER: |
| + CC_STATUS_INIT; |
| + break; |
| + |
| + default: |
| + CC_STATUS_INIT; |
| + } |
| +} |
| + |
| + |
| +/* |
| + Outputs to stdio stream stream the assembler syntax for an instruction |
| + operand x. x is an RTL expression. |
| +*/ |
| +void |
| +avr32_print_operand (FILE * stream, rtx x, int code) |
| +{ |
| + int error = 0; |
| + |
| + if ( code == '?' ) |
| + { |
| + /* Predicable instruction, print condition code */ |
| + |
| + /* If the insn should not be conditional then do nothing. */ |
| + if ( current_insn_predicate == NULL_RTX ) |
| + return; |
| + |
| + /* Set x to the predicate to force printing |
| + the condition later on. */ |
| + x = current_insn_predicate; |
| + |
| + /* Reverse condition if useing bld insn. */ |
| + if ( GET_CODE (XEXP(current_insn_predicate,0)) == AND ) |
| + x = reversed_condition (current_insn_predicate); |
| + } |
| + else if ( code == '!' ) |
| + { |
| + /* Output compare for conditional insn if needed. */ |
| + rtx new_cond; |
| + gcc_assert ( current_insn_predicate != NULL_RTX ); |
| + new_cond = avr32_output_cmp(current_insn_predicate, |
| + GET_MODE(XEXP(current_insn_predicate,0)), |
| + XEXP(current_insn_predicate,0), |
| + XEXP(current_insn_predicate,1)); |
| + |
| + /* Check if the new condition is a special avr32 condition |
| + specified using UNSPECs. If so we must handle it differently. */ |
| + if ( GET_CODE (new_cond) == UNSPEC ) |
| + { |
| + current_insn_predicate = |
| + gen_rtx_UNSPEC (CCmode, |
| + gen_rtvec (2, |
| + XEXP(current_insn_predicate,0), |
| + XEXP(current_insn_predicate,1)), |
| + XINT (new_cond, 1)); |
| + } |
| + else |
| + { |
| + PUT_CODE(current_insn_predicate, GET_CODE(new_cond)); |
| + } |
| + return; |
| + } |
| + |
| + switch (GET_CODE (x)) |
| + { |
| + case UNSPEC: |
| + switch (XINT (x, 1)) |
| + { |
| + case UNSPEC_COND_PL: |
| + if (code == 'i') |
| + fputs ("mi", stream); |
| + else |
| + fputs ("pl", stream); |
| + break; |
| + case UNSPEC_COND_MI: |
| + if (code == 'i') |
| + fputs ("pl", stream); |
| + else |
| + fputs ("mi", stream); |
| + break; |
| + default: |
| + error = 1; |
| + } |
| + break; |
| + case EQ: |
| + if (code == 'i') |
| + fputs ("ne", stream); |
| + else |
| + fputs ("eq", stream); |
| + break; |
| + case NE: |
| + if (code == 'i') |
| + fputs ("eq", stream); |
| + else |
| + fputs ("ne", stream); |
| + break; |
| + case GT: |
| + if (code == 'i') |
| + fputs ("le", stream); |
| + else |
| + fputs ("gt", stream); |
| + break; |
| + case GTU: |
| + if (code == 'i') |
| + fputs ("ls", stream); |
| + else |
| + fputs ("hi", stream); |
| + break; |
| + case LT: |
| + if (code == 'i') |
| + fputs ("ge", stream); |
| + else |
| + fputs ("lt", stream); |
| + break; |
| + case LTU: |
| + if (code == 'i') |
| + fputs ("hs", stream); |
| + else |
| + fputs ("lo", stream); |
| + break; |
| + case GE: |
| + if (code == 'i') |
| + fputs ("lt", stream); |
| + else |
| + fputs ("ge", stream); |
| + break; |
| + case GEU: |
| + if (code == 'i') |
| + fputs ("lo", stream); |
| + else |
| + fputs ("hs", stream); |
| + break; |
| + case LE: |
| + if (code == 'i') |
| + fputs ("gt", stream); |
| + else |
| + fputs ("le", stream); |
| + break; |
| + case LEU: |
| + if (code == 'i') |
| + fputs ("hi", stream); |
| + else |
| + fputs ("ls", stream); |
| + break; |
| + case CONST_INT: |
| + { |
| + HOST_WIDE_INT value = INTVAL (x); |
| + |
| + switch (code) |
| + { |
| + case 'm': |
| + if ( HOST_BITS_PER_WIDE_INT > BITS_PER_WORD ) |
| + { |
| + /* A const_int can be used to represent DImode constants. */ |
| + value >>= BITS_PER_WORD; |
| + } |
| + /* We might get a const_int immediate for setting a DI register, |
| + we then must then return the correct sign extended DI. The most |
| + significant word is just a sign extension. */ |
| + else if (value < 0) |
| + value = -1; |
| + else |
| + value = 0; |
| + break; |
| + case 'i': |
| + value++; |
| + break; |
| + case 'p': |
| + { |
| + /* Set to bit position of first bit set in immediate */ |
| + int i, bitpos = 32; |
| + for (i = 0; i < 32; i++) |
| + if (value & (1 << i)) |
| + { |
| + bitpos = i; |
| + break; |
| + } |
| + value = bitpos; |
| + } |
| + break; |
| + case 'z': |
| + { |
| + /* Set to bit position of first bit cleared in immediate */ |
| + int i, bitpos = 32; |
| + for (i = 0; i < 32; i++) |
| + if (!(value & (1 << i))) |
| + { |
| + bitpos = i; |
| + break; |
| + } |
| + value = bitpos; |
| + } |
| + break; |
| + case 'r': |
| + { |
| + /* Reglist 8 */ |
| + char op[50]; |
| + op[0] = '\0'; |
| + |
| + if (value & 0x01) |
| + sprintf (op, "r0-r3"); |
| + if (value & 0x02) |
| + strlen (op) ? sprintf (op, "%s, r4-r7", op) : sprintf (op, |
| + "r4-r7"); |
| + if (value & 0x04) |
| + strlen (op) ? sprintf (op, "%s, r8-r9", op) : sprintf (op, |
| + "r8-r9"); |
| + if (value & 0x08) |
| + strlen (op) ? sprintf (op, "%s, r10", op) : sprintf (op, |
| + "r10"); |
| + if (value & 0x10) |
| + strlen (op) ? sprintf (op, "%s, r11", op) : sprintf (op, |
| + "r11"); |
| + if (value & 0x20) |
| + strlen (op) ? sprintf (op, "%s, r12", op) : sprintf (op, |
| + "r12"); |
| + if (value & 0x40) |
| + strlen (op) ? sprintf (op, "%s, lr", op) : sprintf (op, "lr"); |
| + if (value & 0x80) |
| + strlen (op) ? sprintf (op, "%s, pc", op) : sprintf (op, "pc"); |
| + |
| + fputs (op, stream); |
| + return; |
| + } |
| + case 's': |
| + { |
| + /* Reglist 16 */ |
| + char reglist16_string[100]; |
| + int i; |
| + reglist16_string[0] = '\0'; |
| + |
| + for (i = 0; i < 16; ++i) |
| + { |
| + if (value & (1 << i)) |
| + { |
| + strlen (reglist16_string) ? sprintf (reglist16_string, |
| + "%s, %s", |
| + reglist16_string, |
| + reg_names |
| + [INTERNAL_REGNUM |
| + (i)]) : |
| + sprintf (reglist16_string, "%s", |
| + reg_names[INTERNAL_REGNUM (i)]); |
| + } |
| + } |
| + fputs (reglist16_string, stream); |
| + return; |
| + } |
| + case 'C': |
| + { |
| + /* RegListCP8 */ |
| + char reglist_string[100]; |
| + avr32_make_fp_reglist_w (value, (char *) reglist_string); |
| + fputs (reglist_string, stream); |
| + return; |
| + } |
| + case 'D': |
| + { |
| + /* RegListCPD8 */ |
| + char reglist_string[100]; |
| + avr32_make_fp_reglist_d (value, (char *) reglist_string); |
| + fputs (reglist_string, stream); |
| + return; |
| + } |
| + case 'h': |
| + /* Print halfword part of word */ |
| + fputs (value ? "b" : "t", stream); |
| + return; |
| + } |
| + |
| + /* Print Value */ |
| + fprintf (stream, "%d", value); |
| + break; |
| + } |
| + case CONST_DOUBLE: |
| + { |
| + HOST_WIDE_INT hi, lo; |
| + if (SCALAR_FLOAT_MODE_P (GET_MODE (x))) |
| + { |
| + HOST_WIDE_INT target_float[2]; |
| + hi = lo = 0; |
| + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x), |
| + GET_MODE (x)); |
| + /* For doubles the most significant part starts at index 0. */ |
| + if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD) |
| + { |
| + hi = target_float[0]; |
| + lo = target_float[1]; |
| + } |
| + else |
| + { |
| + lo = target_float[0]; |
| + } |
| + } |
| + else |
| + { |
| + hi = CONST_DOUBLE_HIGH (x); |
| + lo = CONST_DOUBLE_LOW (x); |
| + } |
| + |
| + if (code == 'm') |
| + fprintf (stream, "%ld", hi); |
| + else |
| + fprintf (stream, "%ld", lo); |
| + |
| + break; |
| + } |
| + case CONST: |
| + output_addr_const (stream, XEXP (XEXP (x, 0), 0)); |
| + fprintf (stream, "+%ld", INTVAL (XEXP (XEXP (x, 0), 1))); |
| + break; |
| + case REG: |
| + /* Swap register name if the register is DImode or DFmode. */ |
| + if (GET_MODE (x) == DImode || GET_MODE (x) == DFmode) |
| + { |
| + /* Double register must have an even numbered address */ |
| + gcc_assert (!(REGNO (x) % 2)); |
| + if (code == 'm') |
| + fputs (reg_names[true_regnum (x)], stream); |
| + else |
| + fputs (reg_names[true_regnum (x) + 1], stream); |
| + } |
| + else if (GET_MODE (x) == TImode) |
| + { |
| + switch (code) |
| + { |
| + case 'T': |
| + fputs (reg_names[true_regnum (x)], stream); |
| + break; |
| + case 'U': |
| + fputs (reg_names[true_regnum (x) + 1], stream); |
| + break; |
| + case 'L': |
| + fputs (reg_names[true_regnum (x) + 2], stream); |
| + break; |
| + case 'B': |
| + fputs (reg_names[true_regnum (x) + 3], stream); |
| + break; |
| + default: |
| + fprintf (stream, "%s, %s, %s, %s", |
| + reg_names[true_regnum (x) + 3], |
| + reg_names[true_regnum (x) + 2], |
| + reg_names[true_regnum (x) + 1], |
| + reg_names[true_regnum (x)]); |
| + break; |
| + } |
| + } |
| + else |
| + { |
| + fputs (reg_names[true_regnum (x)], stream); |
| + } |
| + break; |
| + case CODE_LABEL: |
| + case LABEL_REF: |
| + case SYMBOL_REF: |
| + output_addr_const (stream, x); |
| + break; |
| + case MEM: |
| + switch (GET_CODE (XEXP (x, 0))) |
| + { |
| + case LABEL_REF: |
| + case SYMBOL_REF: |
| + output_addr_const (stream, XEXP (x, 0)); |
| + break; |
| + case MEM: |
| + switch (GET_CODE (XEXP (XEXP (x, 0), 0))) |
| + { |
| + case SYMBOL_REF: |
| + output_addr_const (stream, XEXP (XEXP (x, 0), 0)); |
| + break; |
| + default: |
| + error = 1; |
| + break; |
| + } |
| + break; |
| + case REG: |
| + avr32_print_operand (stream, XEXP (x, 0), 0); |
| + if (code != 'p') |
| + fputs ("[0]", stream); |
| + break; |
| + case PRE_DEC: |
| + fputs ("--", stream); |
| + avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0); |
| + break; |
| + case POST_INC: |
| + avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0); |
| + fputs ("++", stream); |
| + break; |
| + case PLUS: |
| + { |
| + rtx op0 = XEXP (XEXP (x, 0), 0); |
| + rtx op1 = XEXP (XEXP (x, 0), 1); |
| + rtx base = NULL_RTX, offset = NULL_RTX; |
| + |
| + if (avr32_address_register_rtx_p (op0, 1)) |
| + { |
| + base = op0; |
| + offset = op1; |
| + } |
| + else if (avr32_address_register_rtx_p (op1, 1)) |
| + { |
| + /* Operands are switched. */ |
| + base = op1; |
| + offset = op0; |
| + } |
| + |
| + gcc_assert (base && offset |
| + && avr32_address_register_rtx_p (base, 1) |
| + && avr32_legitimate_index_p (GET_MODE (x), offset, |
| + 1)); |
| + |
| + avr32_print_operand (stream, base, 0); |
| + fputs ("[", stream); |
| + avr32_print_operand (stream, offset, 0); |
| + fputs ("]", stream); |
| + break; |
| + } |
| + case CONST: |
| + output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0)); |
| + fprintf (stream, " + %ld", |
| + INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))); |
| + break; |
| + case CONST_INT: |
| + avr32_print_operand (stream, XEXP (x, 0), 0); |
| + break; |
| + default: |
| + error = 1; |
| + } |
| + break; |
| + case MULT: |
| + { |
| + int value = INTVAL (XEXP (x, 1)); |
| + |
| + /* Convert immediate in multiplication into a shift immediate */ |
| + switch (value) |
| + { |
| + case 2: |
| + value = 1; |
| + break; |
| + case 4: |
| + value = 2; |
| + break; |
| + case 8: |
| + value = 3; |
| + break; |
| + default: |
| + value = 0; |
| + } |
| + fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))], |
| + value); |
| + break; |
| + } |
| + case ASHIFT: |
| + if (GET_CODE (XEXP (x, 1)) == CONST_INT) |
| + fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))], |
| + (int) INTVAL (XEXP (x, 1))); |
| + else if (REG_P (XEXP (x, 1))) |
| + fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))], |
| + reg_names[true_regnum (XEXP (x, 1))]); |
| + else |
| + { |
| + error = 1; |
| + } |
| + break; |
| + case LSHIFTRT: |
| + if (GET_CODE (XEXP (x, 1)) == CONST_INT) |
| + fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))], |
| + (int) INTVAL (XEXP (x, 1))); |
| + else if (REG_P (XEXP (x, 1))) |
| + fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))], |
| + reg_names[true_regnum (XEXP (x, 1))]); |
| + else |
| + { |
| + error = 1; |
| + } |
| + fprintf (stream, ">>"); |
| + break; |
| + case PARALLEL: |
| + { |
| + /* Load store multiple */ |
| + int i; |
| + int count = XVECLEN (x, 0); |
| + int reglist16 = 0; |
| + char reglist16_string[100]; |
| + |
| + for (i = 0; i < count; ++i) |
| + { |
| + rtx vec_elm = XVECEXP (x, 0, i); |
| + if (GET_MODE (vec_elm) != SET) |
| + { |
| + debug_rtx (vec_elm); |
| + internal_error ("Unknown element in parallel expression!"); |
| + } |
| + if (GET_MODE (XEXP (vec_elm, 0)) == REG) |
| + { |
| + /* Load multiple */ |
| + reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0))); |
| + } |
| + else |
| + { |
| + /* Store multiple */ |
| + reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1))); |
| + } |
| + } |
| + |
| + avr32_make_reglist16 (reglist16, reglist16_string); |
| + fputs (reglist16_string, stream); |
| + |
| + break; |
| + } |
| + |
| + case PLUS: |
| + { |
| + rtx op0 = XEXP (x, 0); |
| + rtx op1 = XEXP (x, 1); |
| + rtx base = NULL_RTX, offset = NULL_RTX; |
| + |
| + if (avr32_address_register_rtx_p (op0, 1)) |
| + { |
| + base = op0; |
| + offset = op1; |
| + } |
| + else if (avr32_address_register_rtx_p (op1, 1)) |
| + { |
| + /* Operands are switched. */ |
| + base = op1; |
| + offset = op0; |
| + } |
| + |
| + gcc_assert (base && offset |
| + && avr32_address_register_rtx_p (base, 1) |
| + && avr32_legitimate_index_p (GET_MODE (x), offset, 1)); |
| + |
| + avr32_print_operand (stream, base, 0); |
| + fputs ("[", stream); |
| + avr32_print_operand (stream, offset, 0); |
| + fputs ("]", stream); |
| + break; |
| + } |
| + |
| + default: |
| + error = 1; |
| + } |
| + |
| + if (error) |
| + { |
| + debug_rtx (x); |
| + internal_error ("Illegal expression for avr32_print_operand"); |
| + } |
| +} |
| + |
| +rtx |
| +avr32_get_note_reg_equiv (rtx insn) |
| +{ |
| + rtx note; |
| + |
| + note = find_reg_note (insn, REG_EQUIV, NULL_RTX); |
| + |
| + if (note != NULL_RTX) |
| + return XEXP (note, 0); |
| + else |
| + return NULL_RTX; |
| +} |
| + |
| +/* |
| + Outputs to stdio stream stream the assembler syntax for an instruction |
| + operand that is a memory reference whose address is x. x is an RTL |
| + expression. |
| + |
| + ToDo: fixme. |
| +*/ |
| +void |
| +avr32_print_operand_address (FILE * stream, rtx x) |
| +{ |
| + fprintf (stream, "(%d) /* address */", REGNO (x)); |
| +} |
| + |
| +/* Return true if _GLOBAL_OFFSET_TABLE_ symbol is mentioned. */ |
| +bool |
| +avr32_got_mentioned_p (rtx addr) |
| +{ |
| + if (GET_CODE (addr) == MEM) |
| + addr = XEXP (addr, 0); |
| + while (GET_CODE (addr) == CONST) |
| + addr = XEXP (addr, 0); |
| + if (GET_CODE (addr) == SYMBOL_REF) |
| + { |
| + return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_"); |
| + } |
| + if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS) |
| + { |
| + bool l1, l2; |
| + |
| + l1 = avr32_got_mentioned_p (XEXP (addr, 0)); |
| + l2 = avr32_got_mentioned_p (XEXP (addr, 1)); |
| + return l1 || l2; |
| + } |
| + return false; |
| +} |
| + |
| + |
| +/* Find the symbol in an address expression. */ |
| + |
| +rtx |
| +avr32_find_symbol (rtx addr) |
| +{ |
| + if (GET_CODE (addr) == MEM) |
| + addr = XEXP (addr, 0); |
| + |
| + while (GET_CODE (addr) == CONST) |
| + addr = XEXP (addr, 0); |
| + |
| + if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF) |
| + return addr; |
| + if (GET_CODE (addr) == PLUS) |
| + { |
| + rtx l1, l2; |
| + |
| + l1 = avr32_find_symbol (XEXP (addr, 0)); |
| + l2 = avr32_find_symbol (XEXP (addr, 1)); |
| + if (l1 != NULL_RTX && l2 == NULL_RTX) |
| + return l1; |
| + else if (l1 == NULL_RTX && l2 != NULL_RTX) |
| + return l2; |
| + } |
| + |
| + return NULL_RTX; |
| +} |
| + |
| + |
| +/* Routines for manipulation of the constant pool. */ |
| + |
| +/* AVR32 instructions cannot load a large constant directly into a |
| + register; they have to come from a pc relative load. The constant |
| + must therefore be placed in the addressable range of the pc |
| + relative load. Depending on the precise pc relative load |
| + instruction the range is somewhere between 256 bytes and 4k. This |
| + means that we often have to dump a constant inside a function, and |
| + generate code to branch around it. |
| + |
| + It is important to minimize this, since the branches will slow |
| + things down and make the code larger. |
| + |
| + Normally we can hide the table after an existing unconditional |
| + branch so that there is no interruption of the flow, but in the |
| + worst case the code looks like this: |
| + |
| + lddpc rn, L1 |
| + ... |
| + rjmp L2 |
| + align |
| + L1: .long value |
| + L2: |
| + ... |
| + |
| + lddpc rn, L3 |
| + ... |
| + rjmp L4 |
| + align |
| + L3: .long value |
| + L4: |
| + ... |
| + |
| + We fix this by performing a scan after scheduling, which notices |
| + which instructions need to have their operands fetched from the |
| + constant table and builds the table. |
| + |
| + The algorithm starts by building a table of all the constants that |
| + need fixing up and all the natural barriers in the function (places |
| + where a constant table can be dropped without breaking the flow). |
| + For each fixup we note how far the pc-relative replacement will be |
| + able to reach and the offset of the instruction into the function. |
| + |
| + Having built the table we then group the fixes together to form |
| + tables that are as large as possible (subject to addressing |
| + constraints) and emit each table of constants after the last |
| + barrier that is within range of all the instructions in the group. |
| + If a group does not contain a barrier, then we forcibly create one |
| + by inserting a jump instruction into the flow. Once the table has |
| + been inserted, the insns are then modified to reference the |
| + relevant entry in the pool. |
| + |
| + Possible enhancements to the algorithm (not implemented) are: |
| + |
| + 1) For some processors and object formats, there may be benefit in |
| + aligning the pools to the start of cache lines; this alignment |
| + would need to be taken into account when calculating addressability |
| + of a pool. */ |
| + |
| +/* These typedefs are located at the start of this file, so that |
| + they can be used in the prototypes there. This comment is to |
| + remind readers of that fact so that the following structures |
| + can be understood more easily. |
| + |
| + typedef struct minipool_node Mnode; |
| + typedef struct minipool_fixup Mfix; */ |
| + |
| +struct minipool_node |
| +{ |
| + /* Doubly linked chain of entries. */ |
| + Mnode *next; |
| + Mnode *prev; |
| + /* The maximum offset into the code that this entry can be placed. While |
| + pushing fixes for forward references, all entries are sorted in order of |
| + increasing max_address. */ |
| + HOST_WIDE_INT max_address; |
| + /* Similarly for an entry inserted for a backwards ref. */ |
| + HOST_WIDE_INT min_address; |
| + /* The number of fixes referencing this entry. This can become zero if we |
| + "unpush" an entry. In this case we ignore the entry when we come to |
| + emit the code. */ |
| + int refcount; |
| + /* The offset from the start of the minipool. */ |
| + HOST_WIDE_INT offset; |
| + /* The value in table. */ |
| + rtx value; |
| + /* The mode of value. */ |
| + enum machine_mode mode; |
| + /* The size of the value. */ |
| + int fix_size; |
| +}; |
| + |
| +struct minipool_fixup |
| +{ |
| + Mfix *next; |
| + rtx insn; |
| + HOST_WIDE_INT address; |
| + rtx *loc; |
| + enum machine_mode mode; |
| + int fix_size; |
| + rtx value; |
| + Mnode *minipool; |
| + HOST_WIDE_INT forwards; |
| + HOST_WIDE_INT backwards; |
| +}; |
| + |
| + |
| +/* Fixes less than a word need padding out to a word boundary. */ |
| +#define MINIPOOL_FIX_SIZE(mode, value) \ |
| + (IS_FORCE_MINIPOOL(value) ? 0 : \ |
| + (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)) |
| + |
| +#define IS_FORCE_MINIPOOL(x) \ |
| + (GET_CODE(x) == UNSPEC && \ |
| + XINT(x, 1) == UNSPEC_FORCE_MINIPOOL) |
| + |
| +static Mnode *minipool_vector_head; |
| +static Mnode *minipool_vector_tail; |
| + |
| +/* The linked list of all minipool fixes required for this function. */ |
| +Mfix *minipool_fix_head; |
| +Mfix *minipool_fix_tail; |
| +/* The fix entry for the current minipool, once it has been placed. */ |
| +Mfix *minipool_barrier; |
| + |
| +/* Determines if INSN is the start of a jump table. Returns the end |
| + of the TABLE or NULL_RTX. */ |
| +static rtx |
| +is_jump_table (rtx insn) |
| +{ |
| + rtx table; |
| + |
| + if (GET_CODE (insn) == JUMP_INSN |
| + && JUMP_LABEL (insn) != NULL |
| + && ((table = next_real_insn (JUMP_LABEL (insn))) |
| + == next_real_insn (insn)) |
| + && table != NULL |
| + && GET_CODE (table) == JUMP_INSN |
| + && (GET_CODE (PATTERN (table)) == ADDR_VEC |
| + || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC)) |
| + return table; |
| + |
| + return NULL_RTX; |
| +} |
| + |
| +static HOST_WIDE_INT |
| +get_jump_table_size (rtx insn) |
| +{ |
| + /* ADDR_VECs only take room if read-only data does into the text section. */ |
| + if (JUMP_TABLES_IN_TEXT_SECTION |
| +#if !defined(READONLY_DATA_SECTION_ASM_OP) |
| + || 1 |
| +#endif |
| + ) |
| + { |
| + rtx body = PATTERN (insn); |
| + int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0; |
| + |
| + return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +/* Move a minipool fix MP from its current location to before MAX_MP. |
| + If MAX_MP is NULL, then MP doesn't need moving, but the addressing |
| + constraints may need updating. */ |
| +static Mnode * |
| +move_minipool_fix_forward_ref (Mnode * mp, Mnode * max_mp, |
| + HOST_WIDE_INT max_address) |
| +{ |
| + /* This should never be true and the code below assumes these are |
| + different. */ |
| + if (mp == max_mp) |
| + abort (); |
| + |
| + if (max_mp == NULL) |
| + { |
| + if (max_address < mp->max_address) |
| + mp->max_address = max_address; |
| + } |
| + else |
| + { |
| + if (max_address > max_mp->max_address - mp->fix_size) |
| + mp->max_address = max_mp->max_address - mp->fix_size; |
| + else |
| + mp->max_address = max_address; |
| + |
| + /* Unlink MP from its current position. Since max_mp is non-null, |
| + mp->prev must be non-null. */ |
| + mp->prev->next = mp->next; |
| + if (mp->next != NULL) |
| + mp->next->prev = mp->prev; |
| + else |
| + minipool_vector_tail = mp->prev; |
| + |
| + /* Re-insert it before MAX_MP. */ |
| + mp->next = max_mp; |
| + mp->prev = max_mp->prev; |
| + max_mp->prev = mp; |
| + |
| + if (mp->prev != NULL) |
| + mp->prev->next = mp; |
| + else |
| + minipool_vector_head = mp; |
| + } |
| + |
| + /* Save the new entry. */ |
| + max_mp = mp; |
| + |
| + /* Scan over the preceding entries and adjust their addresses as required. |
| + */ |
| + while (mp->prev != NULL |
| + && mp->prev->max_address > mp->max_address - mp->prev->fix_size) |
| + { |
| + mp->prev->max_address = mp->max_address - mp->prev->fix_size; |
| + mp = mp->prev; |
| + } |
| + |
| + return max_mp; |
| +} |
| + |
| +/* Add a constant to the minipool for a forward reference. Returns the |
| + node added or NULL if the constant will not fit in this pool. */ |
| +static Mnode * |
| +add_minipool_forward_ref (Mfix * fix) |
| +{ |
| + /* If set, max_mp is the first pool_entry that has a lower constraint than |
| + the one we are trying to add. */ |
| + Mnode *max_mp = NULL; |
| + HOST_WIDE_INT max_address = fix->address + fix->forwards; |
| + Mnode *mp; |
| + |
| + /* If this fix's address is greater than the address of the first entry, |
| + then we can't put the fix in this pool. We subtract the size of the |
| + current fix to ensure that if the table is fully packed we still have |
| + enough room to insert this value by suffling the other fixes forwards. */ |
| + if (minipool_vector_head && |
| + fix->address >= minipool_vector_head->max_address - fix->fix_size) |
| + return NULL; |
| + |
| + /* Scan the pool to see if a constant with the same value has already been |
| + added. While we are doing this, also note the location where we must |
| + insert the constant if it doesn't already exist. */ |
| + for (mp = minipool_vector_head; mp != NULL; mp = mp->next) |
| + { |
| + if (GET_CODE (fix->value) == GET_CODE (mp->value) |
| + && fix->mode == mp->mode |
| + && (GET_CODE (fix->value) != CODE_LABEL |
| + || (CODE_LABEL_NUMBER (fix->value) |
| + == CODE_LABEL_NUMBER (mp->value))) |
| + && rtx_equal_p (fix->value, mp->value)) |
| + { |
| + /* More than one fix references this entry. */ |
| + mp->refcount++; |
| + return move_minipool_fix_forward_ref (mp, max_mp, max_address); |
| + } |
| + |
| + /* Note the insertion point if necessary. */ |
| + if (max_mp == NULL && mp->max_address > max_address) |
| + max_mp = mp; |
| + |
| + } |
| + |
| + /* The value is not currently in the minipool, so we need to create a new |
| + entry for it. If MAX_MP is NULL, the entry will be put on the end of |
| + the list since the placement is less constrained than any existing |
| + entry. Otherwise, we insert the new fix before MAX_MP and, if |
| + necessary, adjust the constraints on the other entries. */ |
| + mp = xmalloc (sizeof (*mp)); |
| + mp->fix_size = fix->fix_size; |
| + mp->mode = fix->mode; |
| + mp->value = fix->value; |
| + mp->refcount = 1; |
| + /* Not yet required for a backwards ref. */ |
| + mp->min_address = -65536; |
| + |
| + if (max_mp == NULL) |
| + { |
| + mp->max_address = max_address; |
| + mp->next = NULL; |
| + mp->prev = minipool_vector_tail; |
| + |
| + if (mp->prev == NULL) |
| + { |
| + minipool_vector_head = mp; |
| + minipool_vector_label = gen_label_rtx (); |
| + } |
| + else |
| + mp->prev->next = mp; |
| + |
| + minipool_vector_tail = mp; |
| + } |
| + else |
| + { |
| + if (max_address > max_mp->max_address - mp->fix_size) |
| + mp->max_address = max_mp->max_address - mp->fix_size; |
| + else |
| + mp->max_address = max_address; |
| + |
| + mp->next = max_mp; |
| + mp->prev = max_mp->prev; |
| + max_mp->prev = mp; |
| + if (mp->prev != NULL) |
| + mp->prev->next = mp; |
| + else |
| + minipool_vector_head = mp; |
| + } |
| + |
| + /* Save the new entry. */ |
| + max_mp = mp; |
| + |
| + /* Scan over the preceding entries and adjust their addresses as required. |
| + */ |
| + while (mp->prev != NULL |
| + && mp->prev->max_address > mp->max_address - mp->prev->fix_size) |
| + { |
| + mp->prev->max_address = mp->max_address - mp->prev->fix_size; |
| + mp = mp->prev; |
| + } |
| + |
| + return max_mp; |
| +} |
| + |
| +static Mnode * |
| +move_minipool_fix_backward_ref (Mnode * mp, Mnode * min_mp, |
| + HOST_WIDE_INT min_address) |
| +{ |
| + HOST_WIDE_INT offset; |
| + |
| + /* This should never be true, and the code below assumes these are |
| + different. */ |
| + if (mp == min_mp) |
| + abort (); |
| + |
| + if (min_mp == NULL) |
| + { |
| + if (min_address > mp->min_address) |
| + mp->min_address = min_address; |
| + } |
| + else |
| + { |
| + /* We will adjust this below if it is too loose. */ |
| + mp->min_address = min_address; |
| + |
| + /* Unlink MP from its current position. Since min_mp is non-null, |
| + mp->next must be non-null. */ |
| + mp->next->prev = mp->prev; |
| + if (mp->prev != NULL) |
| + mp->prev->next = mp->next; |
| + else |
| + minipool_vector_head = mp->next; |
| + |
| + /* Reinsert it after MIN_MP. */ |
| + mp->prev = min_mp; |
| + mp->next = min_mp->next; |
| + min_mp->next = mp; |
| + if (mp->next != NULL) |
| + mp->next->prev = mp; |
| + else |
| + minipool_vector_tail = mp; |
| + } |
| + |
| + min_mp = mp; |
| + |
| + offset = 0; |
| + for (mp = minipool_vector_head; mp != NULL; mp = mp->next) |
| + { |
| + mp->offset = offset; |
| + if (mp->refcount > 0) |
| + offset += mp->fix_size; |
| + |
| + if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size) |
| + mp->next->min_address = mp->min_address + mp->fix_size; |
| + } |
| + |
| + return min_mp; |
| +} |
| + |
| +/* Add a constant to the minipool for a backward reference. Returns the |
| + node added or NULL if the constant will not fit in this pool. |
| + |
| + Note that the code for insertion for a backwards reference can be |
| + somewhat confusing because the calculated offsets for each fix do |
| + not take into account the size of the pool (which is still under |
| + construction. */ |
| +static Mnode * |
| +add_minipool_backward_ref (Mfix * fix) |
| +{ |
| + /* If set, min_mp is the last pool_entry that has a lower constraint than |
| + the one we are trying to add. */ |
| + Mnode *min_mp = NULL; |
| + /* This can be negative, since it is only a constraint. */ |
| + HOST_WIDE_INT min_address = fix->address - fix->backwards; |
| + Mnode *mp; |
| + |
| + /* If we can't reach the current pool from this insn, or if we can't insert |
| + this entry at the end of the pool without pushing other fixes out of |
| + range, then we don't try. This ensures that we can't fail later on. */ |
| + if (min_address >= minipool_barrier->address |
| + || (minipool_vector_tail->min_address + fix->fix_size |
| + >= minipool_barrier->address)) |
| + return NULL; |
| + |
| + /* Scan the pool to see if a constant with the same value has already been |
| + added. While we are doing this, also note the location where we must |
| + insert the constant if it doesn't already exist. */ |
| + for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev) |
| + { |
| + if (GET_CODE (fix->value) == GET_CODE (mp->value) |
| + && fix->mode == mp->mode |
| + && (GET_CODE (fix->value) != CODE_LABEL |
| + || (CODE_LABEL_NUMBER (fix->value) |
| + == CODE_LABEL_NUMBER (mp->value))) |
| + && rtx_equal_p (fix->value, mp->value) |
| + /* Check that there is enough slack to move this entry to the end |
| + of the table (this is conservative). */ |
| + && (mp->max_address |
| + > (minipool_barrier->address |
| + + minipool_vector_tail->offset |
| + + minipool_vector_tail->fix_size))) |
| + { |
| + mp->refcount++; |
| + return move_minipool_fix_backward_ref (mp, min_mp, min_address); |
| + } |
| + |
| + if (min_mp != NULL) |
| + mp->min_address += fix->fix_size; |
| + else |
| + { |
| + /* Note the insertion point if necessary. */ |
| + if (mp->min_address < min_address) |
| + { |
| + min_mp = mp; |
| + } |
| + else if (mp->max_address |
| + < minipool_barrier->address + mp->offset + fix->fix_size) |
| + { |
| + /* Inserting before this entry would push the fix beyond its |
| + maximum address (which can happen if we have re-located a |
| + forwards fix); force the new fix to come after it. */ |
| + min_mp = mp; |
| + min_address = mp->min_address + fix->fix_size; |
| + } |
| + } |
| + } |
| + |
| + /* We need to create a new entry. */ |
| + mp = xmalloc (sizeof (*mp)); |
| + mp->fix_size = fix->fix_size; |
| + mp->mode = fix->mode; |
| + mp->value = fix->value; |
| + mp->refcount = 1; |
| + mp->max_address = minipool_barrier->address + 65536; |
| + |
| + mp->min_address = min_address; |
| + |
| + if (min_mp == NULL) |
| + { |
| + mp->prev = NULL; |
| + mp->next = minipool_vector_head; |
| + |
| + if (mp->next == NULL) |
| + { |
| + minipool_vector_tail = mp; |
| + minipool_vector_label = gen_label_rtx (); |
| + } |
| + else |
| + mp->next->prev = mp; |
| + |
| + minipool_vector_head = mp; |
| + } |
| + else |
| + { |
| + mp->next = min_mp->next; |
| + mp->prev = min_mp; |
| + min_mp->next = mp; |
| + |
| + if (mp->next != NULL) |
| + mp->next->prev = mp; |
| + else |
| + minipool_vector_tail = mp; |
| + } |
| + |
| + /* Save the new entry. */ |
| + min_mp = mp; |
| + |
| + if (mp->prev) |
| + mp = mp->prev; |
| + else |
| + mp->offset = 0; |
| + |
| + /* Scan over the following entries and adjust their offsets. */ |
| + while (mp->next != NULL) |
| + { |
| + if (mp->next->min_address < mp->min_address + mp->fix_size) |
| + mp->next->min_address = mp->min_address + mp->fix_size; |
| + |
| + if (mp->refcount) |
| + mp->next->offset = mp->offset + mp->fix_size; |
| + else |
| + mp->next->offset = mp->offset; |
| + |
| + mp = mp->next; |
| + } |
| + |
| + return min_mp; |
| +} |
| + |
| +static void |
| +assign_minipool_offsets (Mfix * barrier) |
| +{ |
| + HOST_WIDE_INT offset = 0; |
| + Mnode *mp; |
| + |
| + minipool_barrier = barrier; |
| + |
| + for (mp = minipool_vector_head; mp != NULL; mp = mp->next) |
| + { |
| + mp->offset = offset; |
| + |
| + if (mp->refcount > 0) |
| + offset += mp->fix_size; |
| + } |
| +} |
| + |
| +/* Print a symbolic form of X to the debug file, F. */ |
| +static void |
| +avr32_print_value (FILE * f, rtx x) |
| +{ |
| + switch (GET_CODE (x)) |
| + { |
| + case CONST_INT: |
| + fprintf (f, "0x%x", (int) INTVAL (x)); |
| + return; |
| + |
| + case CONST_DOUBLE: |
| + fprintf (f, "<0x%lx,0x%lx>", (long) XWINT (x, 2), (long) XWINT (x, 3)); |
| + return; |
| + |
| + case CONST_VECTOR: |
| + { |
| + int i; |
| + |
| + fprintf (f, "<"); |
| + for (i = 0; i < CONST_VECTOR_NUNITS (x); i++) |
| + { |
| + fprintf (f, "0x%x", (int) INTVAL (CONST_VECTOR_ELT (x, i))); |
| + if (i < (CONST_VECTOR_NUNITS (x) - 1)) |
| + fputc (',', f); |
| + } |
| + fprintf (f, ">"); |
| + } |
| + return; |
| + |
| + case CONST_STRING: |
| + fprintf (f, "\"%s\"", XSTR (x, 0)); |
| + return; |
| + |
| + case SYMBOL_REF: |
| + fprintf (f, "`%s'", XSTR (x, 0)); |
| + return; |
| + |
| + case LABEL_REF: |
| + fprintf (f, "L%d", INSN_UID (XEXP (x, 0))); |
| + return; |
| + |
| + case CONST: |
| + avr32_print_value (f, XEXP (x, 0)); |
| + return; |
| + |
| + case PLUS: |
| + avr32_print_value (f, XEXP (x, 0)); |
| + fprintf (f, "+"); |
| + avr32_print_value (f, XEXP (x, 1)); |
| + return; |
| + |
| + case PC: |
| + fprintf (f, "pc"); |
| + return; |
| + |
| + default: |
| + fprintf (f, "????"); |
| + return; |
| + } |
| +} |
| + |
| +int |
| +is_minipool_label (rtx label) |
| +{ |
| + minipool_labels *cur_mp_label = cfun->machine->minipool_label_head; |
| + |
| + if (GET_CODE (label) != CODE_LABEL) |
| + return FALSE; |
| + |
| + while (cur_mp_label) |
| + { |
| + if (CODE_LABEL_NUMBER (label) |
| + == CODE_LABEL_NUMBER (cur_mp_label->label)) |
| + return TRUE; |
| + cur_mp_label = cur_mp_label->next; |
| + } |
| + return FALSE; |
| +} |
| + |
| +static void |
| +new_minipool_label (rtx label) |
| +{ |
| + if (!cfun->machine->minipool_label_head) |
| + { |
| + cfun->machine->minipool_label_head = |
| + ggc_alloc (sizeof (minipool_labels)); |
| + cfun->machine->minipool_label_tail = cfun->machine->minipool_label_head; |
| + cfun->machine->minipool_label_head->label = label; |
| + cfun->machine->minipool_label_head->next = 0; |
| + cfun->machine->minipool_label_head->prev = 0; |
| + } |
| + else |
| + { |
| + cfun->machine->minipool_label_tail->next = |
| + ggc_alloc (sizeof (minipool_labels)); |
| + cfun->machine->minipool_label_tail->next->label = label; |
| + cfun->machine->minipool_label_tail->next->next = 0; |
| + cfun->machine->minipool_label_tail->next->prev = |
| + cfun->machine->minipool_label_tail; |
| + cfun->machine->minipool_label_tail = |
| + cfun->machine->minipool_label_tail->next; |
| + } |
| +} |
| + |
| +/* Output the literal table */ |
| +static void |
| +dump_minipool (rtx scan) |
| +{ |
| + Mnode *mp; |
| + Mnode *nmp; |
| + |
| + if (dump_file) |
| + fprintf (dump_file, |
| + ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n", |
| + INSN_UID (scan), (unsigned long) minipool_barrier->address, 4); |
| + |
| + scan = emit_insn_after (gen_consttable_start (), scan); |
| + scan = emit_insn_after (gen_align_4 (), scan); |
| + scan = emit_label_after (minipool_vector_label, scan); |
| + new_minipool_label (minipool_vector_label); |
| + |
| + for (mp = minipool_vector_head; mp != NULL; mp = nmp) |
| + { |
| + if (mp->refcount > 0) |
| + { |
| + if (dump_file) |
| + { |
| + fprintf (dump_file, |
| + ";; Offset %u, min %ld, max %ld ", |
| + (unsigned) mp->offset, (unsigned long) mp->min_address, |
| + (unsigned long) mp->max_address); |
| + avr32_print_value (dump_file, mp->value); |
| + fputc ('\n', dump_file); |
| + } |
| + |
| + switch (mp->fix_size) |
| + { |
| +#ifdef HAVE_consttable_4 |
| + case 4: |
| + scan = emit_insn_after (gen_consttable_4 (mp->value), scan); |
| + break; |
| + |
| +#endif |
| +#ifdef HAVE_consttable_8 |
| + case 8: |
| + scan = emit_insn_after (gen_consttable_8 (mp->value), scan); |
| + break; |
| + |
| +#endif |
| +#ifdef HAVE_consttable_16 |
| + case 16: |
| + scan = emit_insn_after (gen_consttable_16 (mp->value), scan); |
| + break; |
| + |
| +#endif |
| + case 0: |
| + /* This can happen for force-minipool entries which just are |
| + there to force the minipool to be generate. */ |
| + break; |
| + default: |
| + abort (); |
| + break; |
| + } |
| + } |
| + |
| + nmp = mp->next; |
| + free (mp); |
| + } |
| + |
| + minipool_vector_head = minipool_vector_tail = NULL; |
| + scan = emit_insn_after (gen_consttable_end (), scan); |
| + scan = emit_barrier_after (scan); |
| +} |
| + |
| +/* Return the cost of forcibly inserting a barrier after INSN. */ |
| +static int |
| +avr32_barrier_cost (rtx insn) |
| +{ |
| + /* Basing the location of the pool on the loop depth is preferable, but at |
| + the moment, the basic block information seems to be corrupt by this |
| + stage of the compilation. */ |
| + int base_cost = 50; |
| + rtx next = next_nonnote_insn (insn); |
| + |
| + if (next != NULL && GET_CODE (next) == CODE_LABEL) |
| + base_cost -= 20; |
| + |
| + switch (GET_CODE (insn)) |
| + { |
| + case CODE_LABEL: |
| + /* It will always be better to place the table before the label, rather |
| + than after it. */ |
| + return 50; |
| + |
| + case INSN: |
| + case CALL_INSN: |
| + return base_cost; |
| + |
| + case JUMP_INSN: |
| + return base_cost - 10; |
| + |
| + default: |
| + return base_cost + 10; |
| + } |
| +} |
| + |
| +/* Find the best place in the insn stream in the range |
| + (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier. |
| + Create the barrier by inserting a jump and add a new fix entry for |
| + it. */ |
| +static Mfix * |
| +create_fix_barrier (Mfix * fix, HOST_WIDE_INT max_address) |
| +{ |
| + HOST_WIDE_INT count = 0; |
| + rtx barrier; |
| + rtx from = fix->insn; |
| + rtx selected = from; |
| + int selected_cost; |
| + HOST_WIDE_INT selected_address; |
| + Mfix *new_fix; |
| + HOST_WIDE_INT max_count = max_address - fix->address; |
| + rtx label = gen_label_rtx (); |
| + |
| + selected_cost = avr32_barrier_cost (from); |
| + selected_address = fix->address; |
| + |
| + while (from && count < max_count) |
| + { |
| + rtx tmp; |
| + int new_cost; |
| + |
| + /* This code shouldn't have been called if there was a natural barrier |
| + within range. */ |
| + if (GET_CODE (from) == BARRIER) |
| + abort (); |
| + |
| + /* Count the length of this insn. */ |
| + count += get_attr_length (from); |
| + |
| + /* If there is a jump table, add its length. */ |
| + tmp = is_jump_table (from); |
| + if (tmp != NULL) |
| + { |
| + count += get_jump_table_size (tmp); |
| + |
| + /* Jump tables aren't in a basic block, so base the cost on the |
| + dispatch insn. If we select this location, we will still put |
| + the pool after the table. */ |
| + new_cost = avr32_barrier_cost (from); |
| + |
| + if (count < max_count && new_cost <= selected_cost) |
| + { |
| + selected = tmp; |
| + selected_cost = new_cost; |
| + selected_address = fix->address + count; |
| + } |
| + |
| + /* Continue after the dispatch table. */ |
| + from = NEXT_INSN (tmp); |
| + continue; |
| + } |
| + |
| + new_cost = avr32_barrier_cost (from); |
| + |
| + if (count < max_count && new_cost <= selected_cost) |
| + { |
| + selected = from; |
| + selected_cost = new_cost; |
| + selected_address = fix->address + count; |
| + } |
| + |
| + from = NEXT_INSN (from); |
| + } |
| + |
| + /* Create a new JUMP_INSN that branches around a barrier. */ |
| + from = emit_jump_insn_after (gen_jump (label), selected); |
| + JUMP_LABEL (from) = label; |
| + barrier = emit_barrier_after (from); |
| + emit_label_after (label, barrier); |
| + |
| + /* Create a minipool barrier entry for the new barrier. */ |
| + new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*new_fix)); |
| + new_fix->insn = barrier; |
| + new_fix->address = selected_address; |
| + new_fix->next = fix->next; |
| + fix->next = new_fix; |
| + |
| + return new_fix; |
| +} |
| + |
| +/* Record that there is a natural barrier in the insn stream at |
| + ADDRESS. */ |
| +static void |
| +push_minipool_barrier (rtx insn, HOST_WIDE_INT address) |
| +{ |
| + Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix)); |
| + |
| + fix->insn = insn; |
| + fix->address = address; |
| + |
| + fix->next = NULL; |
| + if (minipool_fix_head != NULL) |
| + minipool_fix_tail->next = fix; |
| + else |
| + minipool_fix_head = fix; |
| + |
| + minipool_fix_tail = fix; |
| +} |
| + |
| +/* Record INSN, which will need fixing up to load a value from the |
| + minipool. ADDRESS is the offset of the insn since the start of the |
| + function; LOC is a pointer to the part of the insn which requires |
| + fixing; VALUE is the constant that must be loaded, which is of type |
| + MODE. */ |
| +static void |
| +push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx * loc, |
| + enum machine_mode mode, rtx value) |
| +{ |
| + Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix)); |
| + rtx body = PATTERN (insn); |
| + |
| + fix->insn = insn; |
| + fix->address = address; |
| + fix->loc = loc; |
| + fix->mode = mode; |
| + fix->fix_size = MINIPOOL_FIX_SIZE (mode, value); |
| + fix->value = value; |
| + |
| + if (GET_CODE (body) == PARALLEL) |
| + { |
| + /* Mcall : Ks16 << 2 */ |
| + fix->forwards = ((1 << 15) - 1) << 2; |
| + fix->backwards = (1 << 15) << 2; |
| + } |
| + else if (GET_CODE (body) == SET |
| + && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 4) |
| + { |
| + /* Word Load */ |
| + if (TARGET_HARD_FLOAT |
| + && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT) |
| + { |
| + /* Ldc0.w : Ku12 << 2 */ |
| + fix->forwards = ((1 << 12) - 1) << 2; |
| + fix->backwards = 0; |
| + } |
| + else |
| + { |
| + if (optimize_size) |
| + { |
| + /* Lddpc : Ku7 << 2 */ |
| + fix->forwards = ((1 << 7) - 1) << 2; |
| + fix->backwards = 0; |
| + } |
| + else |
| + { |
| + /* Ld.w : Ks16 */ |
| + fix->forwards = ((1 << 15) - 4); |
| + fix->backwards = (1 << 15); |
| + } |
| + } |
| + } |
| + else if (GET_CODE (body) == SET |
| + && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 8) |
| + { |
| + /* Double word load */ |
| + if (TARGET_HARD_FLOAT |
| + && GET_MODE_CLASS (GET_MODE (SET_DEST (body))) == MODE_FLOAT) |
| + { |
| + /* Ldc0.d : Ku12 << 2 */ |
| + fix->forwards = ((1 << 12) - 1) << 2; |
| + fix->backwards = 0; |
| + } |
| + else |
| + { |
| + /* Ld.d : Ks16 */ |
| + fix->forwards = ((1 << 15) - 4); |
| + fix->backwards = (1 << 15); |
| + } |
| + } |
| + else if (GET_CODE (body) == UNSPEC_VOLATILE |
| + && XINT (body, 1) == VUNSPEC_MVRC) |
| + { |
| + /* Coprocessor load */ |
| + /* Ldc : Ku8 << 2 */ |
| + fix->forwards = ((1 << 8) - 1) << 2; |
| + fix->backwards = 0; |
| + } |
| + else |
| + { |
| + /* Assume worst case which is lddpc insn. */ |
| + fix->forwards = ((1 << 7) - 1) << 2; |
| + fix->backwards = 0; |
| + } |
| + |
| + fix->minipool = NULL; |
| + |
| + /* If an insn doesn't have a range defined for it, then it isn't expecting |
| + to be reworked by this code. Better to abort now than to generate duff |
| + assembly code. */ |
| + if (fix->forwards == 0 && fix->backwards == 0) |
| + abort (); |
| + |
| + if (dump_file) |
| + { |
| + fprintf (dump_file, |
| + ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ", |
| + GET_MODE_NAME (mode), |
| + INSN_UID (insn), (unsigned long) address, |
| + -1 * (long) fix->backwards, (long) fix->forwards); |
| + avr32_print_value (dump_file, fix->value); |
| + fprintf (dump_file, "\n"); |
| + } |
| + |
| + /* Add it to the chain of fixes. */ |
| + fix->next = NULL; |
| + |
| + if (minipool_fix_head != NULL) |
| + minipool_fix_tail->next = fix; |
| + else |
| + minipool_fix_head = fix; |
| + |
| + minipool_fix_tail = fix; |
| +} |
| + |
| +/* Scan INSN and note any of its operands that need fixing. |
| + If DO_PUSHES is false we do not actually push any of the fixups |
| + needed. The function returns TRUE is any fixups were needed/pushed. |
| + This is used by avr32_memory_load_p() which needs to know about loads |
| + of constants that will be converted into minipool loads. */ |
| +static bool |
| +note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes) |
| +{ |
| + bool result = false; |
| + int opno; |
| + |
| + extract_insn (insn); |
| + |
| + if (!constrain_operands (1)) |
| + fatal_insn_not_found (insn); |
| + |
| + if (recog_data.n_alternatives == 0) |
| + return false; |
| + |
| + /* Fill in recog_op_alt with information about the constraints of this |
| + insn. */ |
| + preprocess_constraints (); |
| + |
| + for (opno = 0; opno < recog_data.n_operands; opno++) |
| + { |
| + rtx op; |
| + |
| + /* Things we need to fix can only occur in inputs. */ |
| + if (recog_data.operand_type[opno] != OP_IN) |
| + continue; |
| + |
| + op = recog_data.operand[opno]; |
| + |
| + if (avr32_const_pool_ref_operand (op, GET_MODE (op))) |
| + { |
| + if (do_pushes) |
| + { |
| + rtx cop = avoid_constant_pool_reference (op); |
| + |
| + /* Casting the address of something to a mode narrower than a |
| + word can cause avoid_constant_pool_reference() to return the |
| + pool reference itself. That's no good to us here. Lets |
| + just hope that we can use the constant pool value directly. |
| + */ |
| + if (op == cop) |
| + cop = get_pool_constant (XEXP (op, 0)); |
| + |
| + push_minipool_fix (insn, address, |
| + recog_data.operand_loc[opno], |
| + recog_data.operand_mode[opno], cop); |
| + } |
| + |
| + result = true; |
| + } |
| + else if (TARGET_HAS_ASM_ADDR_PSEUDOS |
| + && avr32_address_operand (op, GET_MODE (op))) |
| + { |
| + /* Handle pseudo instructions using a direct address. These pseudo |
| + instructions might need entries in the constant pool and we must |
| + therefor create a constant pool for them, in case the |
| + assembler/linker needs to insert entries. */ |
| + if (do_pushes) |
| + { |
| + /* Push a dummy constant pool entry so that the .cpool |
| + directive should be inserted on the appropriate place in the |
| + code even if there are no real constant pool entries. This |
| + is used by the assembler and linker to know where to put |
| + generated constant pool entries. */ |
| + push_minipool_fix (insn, address, |
| + recog_data.operand_loc[opno], |
| + recog_data.operand_mode[opno], |
| + gen_rtx_UNSPEC (VOIDmode, |
| + gen_rtvec (1, const0_rtx), |
| + UNSPEC_FORCE_MINIPOOL)); |
| + result = true; |
| + } |
| + } |
| + } |
| + return result; |
| +} |
| + |
| + |
| +static int |
| +avr32_insn_is_cast (rtx insn) |
| +{ |
| + |
| + if (NONJUMP_INSN_P (insn) |
| + && GET_CODE (PATTERN (insn)) == SET |
| + && (GET_CODE (SET_SRC (PATTERN (insn))) == ZERO_EXTEND |
| + || GET_CODE (SET_SRC (PATTERN (insn))) == SIGN_EXTEND) |
| + && REG_P (XEXP (SET_SRC (PATTERN (insn)), 0)) |
| + && REG_P (SET_DEST (PATTERN (insn)))) |
| + return true; |
| + return false; |
| +} |
| + |
| +/* |
| + Replace all occurances of reg FROM with reg TO in X */ |
| + |
| +rtx |
| +avr32_replace_reg (rtx x, rtx from, rtx to) |
| +{ |
| + int i, j; |
| + const char *fmt; |
| + |
| + gcc_assert ( REG_P (from) && REG_P (to) ); |
| + |
| + /* Allow this function to make replacements in EXPR_LISTs. */ |
| + if (x == 0) |
| + return 0; |
| + |
| + if (rtx_equal_p (x, from)) |
| + return to; |
| + |
| + if (GET_CODE (x) == SUBREG) |
| + { |
| + rtx new = avr32_replace_reg (SUBREG_REG (x), from, to); |
| + |
| + if (GET_CODE (new) == CONST_INT) |
| + { |
| + x = simplify_subreg (GET_MODE (x), new, |
| + GET_MODE (SUBREG_REG (x)), |
| + SUBREG_BYTE (x)); |
| + gcc_assert (x); |
| + } |
| + else |
| + SUBREG_REG (x) = new; |
| + |
| + return x; |
| + } |
| + else if (GET_CODE (x) == ZERO_EXTEND) |
| + { |
| + rtx new = avr32_replace_reg (XEXP (x, 0), from, to); |
| + |
| + if (GET_CODE (new) == CONST_INT) |
| + { |
| + x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x), |
| + new, GET_MODE (XEXP (x, 0))); |
| + gcc_assert (x); |
| + } |
| + else |
| + XEXP (x, 0) = new; |
| + |
| + return x; |
| + } |
| + |
| + fmt = GET_RTX_FORMAT (GET_CODE (x)); |
| + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
| + { |
| + if (fmt[i] == 'e') |
| + XEXP (x, i) = avr32_replace_reg (XEXP (x, i), from, to); |
| + else if (fmt[i] == 'E') |
| + for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
| + XVECEXP (x, i, j) = avr32_replace_reg (XVECEXP (x, i, j), from, to); |
| + } |
| + |
| + return x; |
| +} |
| + |
| + |
| +/* FIXME: The level of nesting in this function is way too deep. It needs to be |
| + torn apart. */ |
| +static void |
| +avr32_reorg_optimization (void) |
| +{ |
| + rtx first = get_first_nonnote_insn (); |
| + rtx insn; |
| + |
| + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0))) |
| + { |
| + |
| + /* Scan through all insns looking for cast operations. */ |
| + if (dump_file) |
| + { |
| + fprintf (dump_file, ";; Deleting redundant cast operations:\n"); |
| + } |
| + for (insn = first; insn; insn = NEXT_INSN (insn)) |
| + { |
| + rtx reg, src_reg, scan; |
| + enum machine_mode mode; |
| + int unused_cast; |
| + rtx label_ref; |
| + |
| + if (avr32_insn_is_cast (insn) |
| + && (GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == QImode |
| + || GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == HImode)) |
| + { |
| + mode = GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)); |
| + reg = SET_DEST (PATTERN (insn)); |
| + src_reg = XEXP (SET_SRC (PATTERN (insn)), 0); |
| + } |
| + else |
| + { |
| + continue; |
| + } |
| + |
| + unused_cast = false; |
| + label_ref = NULL_RTX; |
| + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan)) |
| + { |
| + /* Check if we have reached the destination of a simple |
| + conditional jump which we have already scanned past. If so, |
| + we can safely continue scanning. */ |
| + if (LABEL_P (scan) && label_ref != NULL_RTX) |
| + { |
| + if (CODE_LABEL_NUMBER (scan) == |
| + CODE_LABEL_NUMBER (XEXP (label_ref, 0))) |
| + label_ref = NULL_RTX; |
| + else |
| + break; |
| + } |
| + |
| + if (!INSN_P (scan)) |
| + continue; |
| + |
| + /* For conditional jumps we can manage to keep on scanning if |
| + we meet the destination label later on before any new jump |
| + insns occure. */ |
| + if (GET_CODE (scan) == JUMP_INSN) |
| + { |
| + if (any_condjump_p (scan) && label_ref == NULL_RTX) |
| + label_ref = condjump_label (scan); |
| + else |
| + break; |
| + } |
| + |
| + /* Check if we have a call and the register is used as an argument. */ |
| + if (CALL_P (scan) |
| + && find_reg_fusage (scan, USE, reg) ) |
| + break; |
| + |
| + if (!reg_mentioned_p (reg, PATTERN (scan))) |
| + continue; |
| + |
| + /* Check if casted register is used in this insn */ |
| + if ((regno_use_in (REGNO (reg), PATTERN (scan)) != NULL_RTX) |
| + && (GET_MODE (regno_use_in (REGNO (reg), PATTERN (scan))) == |
| + GET_MODE (reg))) |
| + { |
| + /* If not used in the source to the set or in a memory |
| + expression in the destiantion then the register is used |
| + as a destination and is really dead. */ |
| + if (single_set (scan) |
| + && GET_CODE (PATTERN (scan)) == SET |
| + && REG_P (SET_DEST (PATTERN (scan))) |
| + && !regno_use_in (REGNO (reg), SET_SRC (PATTERN (scan))) |
| + && label_ref == NULL_RTX) |
| + { |
| + unused_cast = true; |
| + } |
| + break; |
| + } |
| + |
| + /* Check if register is dead or set in this insn */ |
| + if (dead_or_set_p (scan, reg)) |
| + { |
| + unused_cast = true; |
| + break; |
| + } |
| + } |
| + |
| + /* Check if we have unresolved conditional jumps */ |
| + if (label_ref != NULL_RTX) |
| + continue; |
| + |
| + if (unused_cast) |
| + { |
| + if (REGNO (reg) == REGNO (XEXP (SET_SRC (PATTERN (insn)), 0))) |
| + { |
| + /* One operand cast, safe to delete */ |
| + if (dump_file) |
| + { |
| + fprintf (dump_file, |
| + ";; INSN %i removed, casted register %i value not used.\n", |
| + INSN_UID (insn), REGNO (reg)); |
| + } |
| + SET_INSN_DELETED (insn); |
| + /* Force the instruction to be recognized again */ |
| + INSN_CODE (insn) = -1; |
| + } |
| + else |
| + { |
| + /* Two operand cast, which really could be substituted with |
| + a move, if the source register is dead after the cast |
| + insn and then the insn which sets the source register |
| + could instead directly set the destination register for |
| + the cast. As long as there are no insns in between which |
| + uses the register. */ |
| + rtx link = NULL_RTX; |
| + rtx set; |
| + rtx src_reg = XEXP (SET_SRC (PATTERN (insn)), 0); |
| + unused_cast = false; |
| + |
| + if (!find_reg_note (insn, REG_DEAD, src_reg)) |
| + continue; |
| + |
| + /* Search for the insn which sets the source register */ |
| + for (link = LOG_LINKS (insn); link; link = XEXP (link, 1)) |
| + { |
| + if (REG_NOTE_KIND (link) != 0) |
| + continue; |
| + set = single_set (XEXP (link, 0)); |
| + if (set && rtx_equal_p (src_reg, SET_DEST (set))) |
| + { |
| + link = XEXP (link, 0); |
| + break; |
| + } |
| + } |
| + |
| + /* Found no link or link is a call insn where we can not |
| + change the destination register */ |
| + if (link == NULL_RTX || CALL_P (link)) |
| + continue; |
| + |
| + /* Scan through all insn between link and insn */ |
| + for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan)) |
| + { |
| + /* Don't try to trace forward past a CODE_LABEL if we |
| + haven't seen INSN yet. Ordinarily, we will only |
| + find the setting insn in LOG_LINKS if it is in the |
| + same basic block. However, cross-jumping can insert |
| + code labels in between the load and the call, and |
| + can result in situations where a single call insn |
| + may have two targets depending on where we came |
| + from. */ |
| + |
| + if (GET_CODE (scan) == CODE_LABEL) |
| + break; |
| + |
| + if (!INSN_P (scan)) |
| + continue; |
| + |
| + /* Don't try to trace forward past a JUMP. To optimize |
| + safely, we would have to check that all the |
| + instructions at the jump destination did not use REG. |
| + */ |
| + |
| + if (GET_CODE (scan) == JUMP_INSN) |
| + { |
| + break; |
| + } |
| + |
| + if (!reg_mentioned_p (src_reg, PATTERN (scan))) |
| + continue; |
| + |
| + /* We have reached the cast insn */ |
| + if (scan == insn) |
| + { |
| + /* We can remove cast and replace the destination |
| + register of the link insn with the destination |
| + of the cast */ |
| + if (dump_file) |
| + { |
| + fprintf (dump_file, |
| + ";; INSN %i removed, casted value unused. " |
| + "Destination of removed cast operation: register %i, folded into INSN %i.\n", |
| + INSN_UID (insn), REGNO (reg), |
| + INSN_UID (link)); |
| + } |
| + /* Update link insn */ |
| + SET_DEST (PATTERN (link)) = |
| + gen_rtx_REG (mode, REGNO (reg)); |
| + /* Force the instruction to be recognized again */ |
| + INSN_CODE (link) = -1; |
| + |
| + /* Delete insn */ |
| + SET_INSN_DELETED (insn); |
| + /* Force the instruction to be recognized again */ |
| + INSN_CODE (insn) = -1; |
| + break; |
| + } |
| + } |
| + } |
| + } |
| + } |
| + } |
| + |
| + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0))) |
| + { |
| + |
| + /* Scan through all insns looking for shifted add operations */ |
| + if (dump_file) |
| + { |
| + fprintf (dump_file, |
| + ";; Deleting redundant shifted add operations:\n"); |
| + } |
| + for (insn = first; insn; insn = NEXT_INSN (insn)) |
| + { |
| + rtx reg, mem_expr, scan, op0, op1; |
| + int add_only_used_as_pointer; |
| + |
| + if (INSN_P (insn) |
| + && GET_CODE (PATTERN (insn)) == SET |
| + && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS |
| + && (GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == MULT |
| + || GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == ASHIFT) |
| + && GET_CODE (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1)) == |
| + CONST_INT && REG_P (SET_DEST (PATTERN (insn))) |
| + && REG_P (XEXP (SET_SRC (PATTERN (insn)), 1)) |
| + && REG_P (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0))) |
| + { |
| + reg = SET_DEST (PATTERN (insn)); |
| + mem_expr = SET_SRC (PATTERN (insn)); |
| + op0 = XEXP (XEXP (mem_expr, 0), 0); |
| + op1 = XEXP (mem_expr, 1); |
| + } |
| + else |
| + { |
| + continue; |
| + } |
| + |
| + /* Scan forward the check if the result of the shifted add |
| + operation is only used as an address in memory operations and |
| + that the operands to the shifted add are not clobbered. */ |
| + add_only_used_as_pointer = false; |
| + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan)) |
| + { |
| + if (!INSN_P (scan)) |
| + continue; |
| + |
| + /* Don't try to trace forward past a JUMP or CALL. To optimize |
| + safely, we would have to check that all the instructions at |
| + the jump destination did not use REG. */ |
| + |
| + if (GET_CODE (scan) == JUMP_INSN) |
| + { |
| + break; |
| + } |
| + |
| + /* If used in a call insn then we cannot optimize it away */ |
| + if (CALL_P (scan) && find_regno_fusage (scan, USE, REGNO (reg))) |
| + break; |
| + |
| + /* If any of the operands of the shifted add are clobbered we |
| + cannot optimize the shifted adda away */ |
| + if ((reg_set_p (op0, scan) && (REGNO (op0) != REGNO (reg))) |
| + || (reg_set_p (op1, scan) && (REGNO (op1) != REGNO (reg)))) |
| + break; |
| + |
| + if (!reg_mentioned_p (reg, PATTERN (scan))) |
| + continue; |
| + |
| + /* If used any other place than as a pointer or as the |
| + destination register we failed */ |
| + if (!(single_set (scan) |
| + && GET_CODE (PATTERN (scan)) == SET |
| + && ((MEM_P (SET_DEST (PATTERN (scan))) |
| + && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0)) |
| + && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) == REGNO (reg)) |
| + || (MEM_P (SET_SRC (PATTERN (scan))) |
| + && REG_P (XEXP (SET_SRC (PATTERN (scan)), 0)) |
| + && REGNO (XEXP |
| + (SET_SRC (PATTERN (scan)), 0)) == REGNO (reg)))) |
| + && !(GET_CODE (PATTERN (scan)) == SET |
| + && REG_P (SET_DEST (PATTERN (scan))) |
| + && !regno_use_in (REGNO (reg), |
| + SET_SRC (PATTERN (scan))))) |
| + break; |
| + |
| + /* We cannot replace the pointer in TImode insns |
| + as these has a differene addressing mode than the other |
| + memory insns. */ |
| + if ( GET_MODE (SET_DEST (PATTERN (scan))) == TImode ) |
| + break; |
| + |
| + /* Check if register is dead or set in this insn */ |
| + if (dead_or_set_p (scan, reg)) |
| + { |
| + add_only_used_as_pointer = true; |
| + break; |
| + } |
| + } |
| + |
| + if (add_only_used_as_pointer) |
| + { |
| + /* Lets delete the add insn and replace all memory references |
| + which uses the pointer with the full expression. */ |
| + if (dump_file) |
| + { |
| + fprintf (dump_file, |
| + ";; Deleting INSN %i since address expression can be folded into all " |
| + "memory references using this expression\n", |
| + INSN_UID (insn)); |
| + } |
| + SET_INSN_DELETED (insn); |
| + /* Force the instruction to be recognized again */ |
| + INSN_CODE (insn) = -1; |
| + |
| + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan)) |
| + { |
| + if (!INSN_P (scan)) |
| + continue; |
| + |
| + if (!reg_mentioned_p (reg, PATTERN (scan))) |
| + continue; |
| + |
| + /* If used any other place than as a pointer or as the |
| + destination register we failed */ |
| + if ((single_set (scan) |
| + && GET_CODE (PATTERN (scan)) == SET |
| + && ((MEM_P (SET_DEST (PATTERN (scan))) |
| + && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0)) |
| + && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) == |
| + REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan))) |
| + && |
| + REG_P (XEXP |
| + (SET_SRC (PATTERN (scan)), |
| + 0)) |
| + && |
| + REGNO (XEXP |
| + (SET_SRC (PATTERN (scan)), |
| + 0)) == REGNO (reg))))) |
| + { |
| + if (dump_file) |
| + { |
| + fprintf (dump_file, |
| + ";; Register %i replaced by indexed address in INSN %i\n", |
| + REGNO (reg), INSN_UID (scan)); |
| + } |
| + if (MEM_P (SET_DEST (PATTERN (scan)))) |
| + XEXP (SET_DEST (PATTERN (scan)), 0) = mem_expr; |
| + else |
| + XEXP (SET_SRC (PATTERN (scan)), 0) = mem_expr; |
| + } |
| + |
| + /* Check if register is dead or set in this insn */ |
| + if (dead_or_set_p (scan, reg)) |
| + { |
| + break; |
| + } |
| + |
| + } |
| + } |
| + } |
| + } |
| + |
| + |
| + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0))) |
| + { |
| + |
| + /* Scan through all insns looking for conditional register to |
| + register move operations */ |
| + if (dump_file) |
| + { |
| + fprintf (dump_file, |
| + ";; Folding redundant conditional move operations:\n"); |
| + } |
| + for (insn = first; insn; insn = next_nonnote_insn (insn)) |
| + { |
| + rtx src_reg, dst_reg, scan, test; |
| + |
| + if (INSN_P (insn) |
| + && GET_CODE (PATTERN (insn)) == COND_EXEC |
| + && GET_CODE (COND_EXEC_CODE (PATTERN (insn))) == SET |
| + && REG_P (SET_SRC (COND_EXEC_CODE (PATTERN (insn)))) |
| + && REG_P (SET_DEST (COND_EXEC_CODE (PATTERN (insn)))) |
| + && find_reg_note (insn, REG_DEAD, SET_SRC (COND_EXEC_CODE (PATTERN (insn))))) |
| + { |
| + src_reg = SET_SRC (COND_EXEC_CODE (PATTERN (insn))); |
| + dst_reg = SET_DEST (COND_EXEC_CODE (PATTERN (insn))); |
| + test = COND_EXEC_TEST (PATTERN (insn)); |
| + } |
| + else |
| + { |
| + continue; |
| + } |
| + |
| + /* Scan backward through the rest of insns in this if-then or if-else |
| + block and check if we can fold the move into another of the conditional |
| + insns in the same block. */ |
| + scan = prev_nonnote_insn (insn); |
| + while (INSN_P (scan) |
| + && GET_CODE (PATTERN (scan)) == COND_EXEC |
| + && rtx_equal_p (COND_EXEC_TEST (PATTERN (scan)), test)) |
| + { |
| + rtx pattern = COND_EXEC_CODE (PATTERN (scan)); |
| + if ( GET_CODE (pattern) == PARALLEL ) |
| + pattern = XVECEXP (pattern, 0, 0); |
| + |
| + if ( reg_set_p (src_reg, pattern) ) |
| + { |
| + /* Fold in the destination register for the cond. move |
| + into this insn. */ |
| + SET_DEST (pattern) = dst_reg; |
| + if (dump_file) |
| + { |
| + fprintf (dump_file, |
| + ";; Deleting INSN %i since this operation can be folded into INSN %i\n", |
| + INSN_UID (insn), INSN_UID (scan)); |
| + } |
| + |
| + /* Scan and check if any of the insns in between uses the src_reg. We |
| + must then replace it with the dst_reg. */ |
| + while ( (scan = next_nonnote_insn (scan)) != insn ){ |
| + avr32_replace_reg (scan, src_reg, dst_reg); |
| + } |
| + /* Delete the insn. */ |
| + SET_INSN_DELETED (insn); |
| + |
| + /* Force the instruction to be recognized again */ |
| + INSN_CODE (insn) = -1; |
| + break; |
| + } |
| + |
| + /* If the destination register is used but not set in this insn |
| + we cannot fold. */ |
| + if ( reg_mentioned_p (dst_reg, pattern) ) |
| + break; |
| + |
| + scan = prev_nonnote_insn (scan); |
| + } |
| + } |
| + } |
| + |
| +} |
| + |
| +/* Exported to toplev.c. |
| + |
| + Do a final pass over the function, just before delayed branch |
| + scheduling. */ |
| + |
| +static void |
| +avr32_reorg (void) |
| +{ |
| + rtx insn; |
| + HOST_WIDE_INT address = 0; |
| + Mfix *fix; |
| + |
| + minipool_fix_head = minipool_fix_tail = NULL; |
| + |
| + /* The first insn must always be a note, or the code below won't scan it |
| + properly. */ |
| + insn = get_insns (); |
| + if (GET_CODE (insn) != NOTE) |
| + abort (); |
| + |
| + /* Scan all the insns and record the operands that will need fixing. */ |
| + for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn)) |
| + { |
| + if (GET_CODE (insn) == BARRIER) |
| + push_minipool_barrier (insn, address); |
| + else if (INSN_P (insn)) |
| + { |
| + rtx table; |
| + |
| + note_invalid_constants (insn, address, true); |
| + address += get_attr_length (insn); |
| + |
| + /* If the insn is a vector jump, add the size of the table and skip |
| + the table. */ |
| + if ((table = is_jump_table (insn)) != NULL) |
| + { |
| + address += get_jump_table_size (table); |
| + insn = table; |
| + } |
| + } |
| + } |
| + |
| + fix = minipool_fix_head; |
| + |
| + /* Now scan the fixups and perform the required changes. */ |
| + while (fix) |
| + { |
| + Mfix *ftmp; |
| + Mfix *fdel; |
| + Mfix *last_added_fix; |
| + Mfix *last_barrier = NULL; |
| + Mfix *this_fix; |
| + |
| + /* Skip any further barriers before the next fix. */ |
| + while (fix && GET_CODE (fix->insn) == BARRIER) |
| + fix = fix->next; |
| + |
| + /* No more fixes. */ |
| + if (fix == NULL) |
| + break; |
| + |
| + last_added_fix = NULL; |
| + |
| + for (ftmp = fix; ftmp; ftmp = ftmp->next) |
| + { |
| + if (GET_CODE (ftmp->insn) == BARRIER) |
| + { |
| + if (ftmp->address >= minipool_vector_head->max_address) |
| + break; |
| + |
| + last_barrier = ftmp; |
| + } |
| + else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL) |
| + break; |
| + |
| + last_added_fix = ftmp; /* Keep track of the last fix added. |
| + */ |
| + } |
| + |
| + /* If we found a barrier, drop back to that; any fixes that we could |
| + have reached but come after the barrier will now go in the next |
| + mini-pool. */ |
| + if (last_barrier != NULL) |
| + { |
| + /* Reduce the refcount for those fixes that won't go into this pool |
| + after all. */ |
| + for (fdel = last_barrier->next; |
| + fdel && fdel != ftmp; fdel = fdel->next) |
| + { |
| + fdel->minipool->refcount--; |
| + fdel->minipool = NULL; |
| + } |
| + |
| + ftmp = last_barrier; |
| + } |
| + else |
| + { |
| + /* ftmp is first fix that we can't fit into this pool and there no |
| + natural barriers that we could use. Insert a new barrier in the |
| + code somewhere between the previous fix and this one, and |
| + arrange to jump around it. */ |
| + HOST_WIDE_INT max_address; |
| + |
| + /* The last item on the list of fixes must be a barrier, so we can |
| + never run off the end of the list of fixes without last_barrier |
| + being set. */ |
| + if (ftmp == NULL) |
| + abort (); |
| + |
| + max_address = minipool_vector_head->max_address; |
| + /* Check that there isn't another fix that is in range that we |
| + couldn't fit into this pool because the pool was already too |
| + large: we need to put the pool before such an instruction. */ |
| + if (ftmp->address < max_address) |
| + max_address = ftmp->address; |
| + |
| + last_barrier = create_fix_barrier (last_added_fix, max_address); |
| + } |
| + |
| + assign_minipool_offsets (last_barrier); |
| + |
| + while (ftmp) |
| + { |
| + if (GET_CODE (ftmp->insn) != BARRIER |
| + && ((ftmp->minipool = add_minipool_backward_ref (ftmp)) |
| + == NULL)) |
| + break; |
| + |
| + ftmp = ftmp->next; |
| + } |
| + |
| + /* Scan over the fixes we have identified for this pool, fixing them up |
| + and adding the constants to the pool itself. */ |
| + for (this_fix = fix; this_fix && ftmp != this_fix; |
| + this_fix = this_fix->next) |
| + if (GET_CODE (this_fix->insn) != BARRIER |
| + /* Do nothing for entries present just to force the insertion of |
| + a minipool. */ |
| + && !IS_FORCE_MINIPOOL (this_fix->value)) |
| + { |
| + rtx addr = plus_constant (gen_rtx_LABEL_REF (VOIDmode, |
| + minipool_vector_label), |
| + this_fix->minipool->offset); |
| + *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr); |
| + } |
| + |
| + dump_minipool (last_barrier->insn); |
| + fix = ftmp; |
| + } |
| + |
| + /* Free the minipool memory. */ |
| + obstack_free (&minipool_obstack, minipool_startobj); |
| + |
| + avr32_reorg_optimization (); |
| +} |
| + |
| + |
| +/* |
| + Hook for doing some final scanning of instructions. Does nothing yet...*/ |
| +void |
| +avr32_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED, |
| + rtx * opvec ATTRIBUTE_UNUSED, |
| + int noperands ATTRIBUTE_UNUSED) |
| +{ |
| + return; |
| +} |
| + |
| + |
| +/* Function for changing the condition on the next instruction, |
| + should be used when emmiting compare instructions and |
| + the condition of the next instruction needs to change. |
| +*/ |
| +int |
| +set_next_insn_cond (rtx cur_insn, rtx new_cond) |
| +{ |
| + rtx next_insn = next_nonnote_insn (cur_insn); |
| + if ((next_insn != NULL_RTX) |
| + && (INSN_P (next_insn))) |
| + { |
| + if ((GET_CODE (PATTERN (next_insn)) == SET) |
| + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE)) |
| + { |
| + /* Branch instructions */ |
| + XEXP (SET_SRC (PATTERN (next_insn)), 0) = new_cond; |
| + /* Force the instruction to be recognized again */ |
| + INSN_CODE (next_insn) = -1; |
| + return TRUE; |
| + } |
| + else if ((GET_CODE (PATTERN (next_insn)) == SET) |
| + && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)), |
| + GET_MODE (SET_SRC (PATTERN (next_insn))))) |
| + { |
| + /* scc with no compare */ |
| + SET_SRC (PATTERN (next_insn)) = new_cond; |
| + /* Force the instruction to be recognized again */ |
| + INSN_CODE (next_insn) = -1; |
| + return TRUE; |
| + } |
| + else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC) |
| + { |
| + if ( GET_CODE (new_cond) == UNSPEC ) |
| + { |
| + COND_EXEC_TEST (PATTERN (next_insn)) = |
| + gen_rtx_UNSPEC (CCmode, |
| + gen_rtvec (2, |
| + XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0), |
| + XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1)), |
| + XINT (new_cond, 1)); |
| + } |
| + else |
| + { |
| + PUT_CODE(COND_EXEC_TEST (PATTERN (next_insn)), GET_CODE(new_cond)); |
| + } |
| + } |
| + } |
| + |
| + return FALSE; |
| +} |
| + |
| +/* Function for obtaining the condition for the next instruction |
| + after cur_insn. |
| +*/ |
| +rtx |
| +get_next_insn_cond (rtx cur_insn) |
| +{ |
| + rtx next_insn = next_nonnote_insn (cur_insn); |
| + rtx cond = NULL_RTX; |
| + if (next_insn != NULL_RTX |
| + && INSN_P (next_insn)) |
| + { |
| + if ((GET_CODE (PATTERN (next_insn)) == SET) |
| + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE)) |
| + { |
| + /* Branch and cond if then else instructions */ |
| + cond = XEXP (SET_SRC (PATTERN (next_insn)), 0); |
| + } |
| + else if ((GET_CODE (PATTERN (next_insn)) == SET) |
| + && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)), |
| + GET_MODE (SET_SRC (PATTERN (next_insn))))) |
| + { |
| + /* scc with no compare */ |
| + cond = SET_SRC (PATTERN (next_insn)); |
| + } |
| + else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC) |
| + { |
| + cond = COND_EXEC_TEST (PATTERN (next_insn)); |
| + } |
| + } |
| + return cond; |
| +} |
| + |
| + |
| +rtx |
| +avr32_output_cmp (rtx cond, enum machine_mode mode, rtx op0, rtx op1) |
| +{ |
| + |
| + rtx new_cond = NULL_RTX; |
| + rtx ops[2]; |
| + rtx compare_pattern; |
| + ops[0] = op0; |
| + ops[1] = op1; |
| + |
| + if ( GET_CODE (op0) == AND ) |
| + compare_pattern = op0; |
| + else |
| + compare_pattern = gen_rtx_COMPARE (mode, op0, op1); |
| + |
| + new_cond = is_compare_redundant (compare_pattern, cond); |
| + |
| + if (new_cond != NULL_RTX) |
| + return new_cond; |
| + |
| + /* Check if we are inserting a bit-load instead of a compare. */ |
| + if ( GET_CODE (op0) == AND ) |
| + { |
| + ops[0] = XEXP (op0, 0); |
| + ops[1] = XEXP (op0, 1); |
| + output_asm_insn ("bld\t%0, %p1", ops); |
| + return cond; |
| + } |
| + |
| + /* Insert compare */ |
| + switch (mode) |
| + { |
| + case QImode: |
| + output_asm_insn ("cp.b\t%0, %1", ops); |
| + break; |
| + case HImode: |
| + output_asm_insn ("cp.h\t%0, %1", ops); |
| + break; |
| + case SImode: |
| + output_asm_insn ("cp.w\t%0, %1", ops); |
| + break; |
| + case DImode: |
| + if (GET_CODE (op1) != REG) |
| + output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0", ops); |
| + else |
| + output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0, %m1", ops); |
| + break; |
| + default: |
| + internal_error ("Unknown comparison mode"); |
| + break; |
| + } |
| + |
| + return cond; |
| +} |
| + |
| +int |
| +avr32_load_multiple_operation (rtx op, |
| + enum machine_mode mode ATTRIBUTE_UNUSED) |
| +{ |
| + int count = XVECLEN (op, 0); |
| + unsigned int dest_regno; |
| + rtx src_addr; |
| + rtx elt; |
| + int i = 1, base = 0; |
| + |
| + if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET) |
| + return 0; |
| + |
| + /* Check to see if this might be a write-back. */ |
| + if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS) |
| + { |
| + i++; |
| + base = 1; |
| + |
| + /* Now check it more carefully. */ |
| + if (GET_CODE (SET_DEST (elt)) != REG |
| + || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG |
| + || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT |
| + || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4) |
| + return 0; |
| + } |
| + |
| + /* Perform a quick check so we don't blow up below. */ |
| + if (count <= 1 |
| + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET |
| + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG |
| + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC) |
| + return 0; |
| + |
| + dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1))); |
| + src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0); |
| + |
| + for (; i < count; i++) |
| + { |
| + elt = XVECEXP (op, 0, i); |
| + |
| + if (GET_CODE (elt) != SET |
| + || GET_CODE (SET_DEST (elt)) != REG |
| + || GET_MODE (SET_DEST (elt)) != SImode |
| + || GET_CODE (SET_SRC (elt)) != UNSPEC) |
| + return 0; |
| + } |
| + |
| + return 1; |
| +} |
| + |
| +int |
| +avr32_store_multiple_operation (rtx op, |
| + enum machine_mode mode ATTRIBUTE_UNUSED) |
| +{ |
| + int count = XVECLEN (op, 0); |
| + int src_regno; |
| + rtx dest_addr; |
| + rtx elt; |
| + int i = 1; |
| + |
| + if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET) |
| + return 0; |
| + |
| + /* Perform a quick check so we don't blow up below. */ |
| + if (count <= i |
| + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET |
| + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM |
| + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC) |
| + return 0; |
| + |
| + src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1))); |
| + dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0); |
| + |
| + for (; i < count; i++) |
| + { |
| + elt = XVECEXP (op, 0, i); |
| + |
| + if (GET_CODE (elt) != SET |
| + || GET_CODE (SET_DEST (elt)) != MEM |
| + || GET_MODE (SET_DEST (elt)) != SImode |
| + || GET_CODE (SET_SRC (elt)) != UNSPEC) |
| + return 0; |
| + } |
| + |
| + return 1; |
| +} |
| + |
| +int |
| +avr32_valid_macmac_bypass (rtx insn_out, rtx insn_in) |
| +{ |
| + /* Check if they use the same accumulator */ |
| + if (rtx_equal_p |
| + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in)))) |
| + { |
| + return TRUE; |
| + } |
| + |
| + return FALSE; |
| +} |
| + |
| +int |
| +avr32_valid_mulmac_bypass (rtx insn_out, rtx insn_in) |
| +{ |
| + /* |
| + Check if the mul instruction produces the accumulator for the mac |
| + instruction. */ |
| + if (rtx_equal_p |
| + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in)))) |
| + { |
| + return TRUE; |
| + } |
| + return FALSE; |
| +} |
| + |
| +int |
| +avr32_store_bypass (rtx insn_out, rtx insn_in) |
| +{ |
| + /* Only valid bypass if the output result is used as an src in the store |
| + instruction, NOT if used as a pointer or base. */ |
| + if (rtx_equal_p |
| + (SET_DEST (PATTERN (insn_out)), SET_SRC (PATTERN (insn_in)))) |
| + { |
| + return TRUE; |
| + } |
| + |
| + return FALSE; |
| +} |
| + |
| +int |
| +avr32_mul_waw_bypass (rtx insn_out, rtx insn_in) |
| +{ |
| + /* Check if the register holding the result from the mul instruction is |
| + used as a result register in the input instruction. */ |
| + if (rtx_equal_p |
| + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in)))) |
| + { |
| + return TRUE; |
| + } |
| + |
| + return FALSE; |
| +} |
| + |
| +int |
| +avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in) |
| +{ |
| + /* Check if the first loaded word in insn_out is used in insn_in. */ |
| + rtx dst_reg; |
| + rtx second_loaded_reg; |
| + |
| + /* If this is a double alu operation then the bypass is not valid */ |
| + if ((get_attr_type (insn_in) == TYPE_ALU |
| + || get_attr_type (insn_in) == TYPE_ALU2) |
| + && (GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (insn_out)))) > 4)) |
| + return FALSE; |
| + |
| + /* Get the destination register in the load */ |
| + if (!REG_P (SET_DEST (PATTERN (insn_out)))) |
| + return FALSE; |
| + |
| + dst_reg = SET_DEST (PATTERN (insn_out)); |
| + second_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 1); |
| + |
| + if (!reg_mentioned_p (second_loaded_reg, PATTERN (insn_in))) |
| + return TRUE; |
| + |
| + return FALSE; |
| +} |
| + |
| + |
| +int |
| +avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in) |
| +{ |
| + /* |
| + Check if the two first loaded word in insn_out are used in insn_in. */ |
| + rtx dst_reg; |
| + rtx third_loaded_reg, fourth_loaded_reg; |
| + |
| + /* Get the destination register in the load */ |
| + if (!REG_P (SET_DEST (PATTERN (insn_out)))) |
| + return FALSE; |
| + |
| + dst_reg = SET_DEST (PATTERN (insn_out)); |
| + third_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 2); |
| + fourth_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 3); |
| + |
| + if (!reg_mentioned_p (third_loaded_reg, PATTERN (insn_in)) |
| + && !reg_mentioned_p (fourth_loaded_reg, PATTERN (insn_in))) |
| + { |
| + return TRUE; |
| + } |
| + |
| + return FALSE; |
| +} |
| + |
| + |
| + |
| +rtx |
| +avr32_ifcvt_modify_test (ce_if_block_t *ce_info, |
| + rtx test ){ |
| + rtx branch_insn; |
| + rtx cmp_test; |
| + rtx compare_op0; |
| + rtx compare_op1; |
| + |
| + |
| + if ( !ce_info |
| + || test == NULL_RTX |
| + || !reg_mentioned_p (cc0_rtx, test)) |
| + return test; |
| + |
| + branch_insn = BB_END (ce_info->test_bb); |
| + cmp_test = PATTERN(prev_nonnote_insn (branch_insn)); |
| + |
| + if (GET_CODE(cmp_test) != SET |
| + || !CC0_P(XEXP(cmp_test, 0)) ) |
| + return cmp_test; |
| + |
| + if ( GET_CODE(SET_SRC(cmp_test)) == COMPARE ){ |
| + compare_op0 = XEXP(SET_SRC(cmp_test), 0); |
| + compare_op1 = XEXP(SET_SRC(cmp_test), 1); |
| + } else { |
| + compare_op0 = SET_SRC(cmp_test); |
| + compare_op1 = const0_rtx; |
| + } |
| + |
| + return gen_rtx_fmt_ee (GET_CODE(test), GET_MODE (compare_op0), |
| + compare_op0, compare_op1); |
| +} |
| + |
| + |
| + |
| +rtx |
| +avr32_ifcvt_modify_insn (ce_if_block_t *ce_info, |
| + rtx pattern, |
| + rtx insn, |
| + int *num_true_changes){ |
| + rtx test = COND_EXEC_TEST(pattern); |
| + rtx op = COND_EXEC_CODE(pattern); |
| + rtx cmp_insn; |
| + rtx cond_exec_insn; |
| + int inputs_set_outside_ifblock = 1; |
| + basic_block current_bb = BLOCK_FOR_INSN (insn); |
| + rtx bb_insn ; |
| + enum machine_mode mode = GET_MODE (XEXP (op, 0)); |
| + |
| + if (CC0_P(XEXP(test, 0))) |
| + test = avr32_ifcvt_modify_test (ce_info, |
| + test ); |
| + |
| + pattern = gen_rtx_COND_EXEC (VOIDmode, test, op); |
| + |
| + if ( !reload_completed ) |
| + { |
| + rtx start; |
| + int num_insns; |
| + int max_insns = MAX_CONDITIONAL_EXECUTE; |
| + |
| + if ( !ce_info ) |
| + return op; |
| + |
| + /* Check if the insn is not suitable for conditional |
| + execution. */ |
| + start_sequence (); |
| + cond_exec_insn = emit_insn (pattern); |
| + if ( recog_memoized (cond_exec_insn) < 0 |
| + && !no_new_pseudos ) |
| + { |
| + /* Insn is not suitable for conditional execution, try |
| + to fix it up by using an extra scratch register or |
| + by pulling the operation outside the if-then-else |
| + and then emiting a conditional move inside the if-then-else. */ |
| + end_sequence (); |
| + if ( GET_CODE (op) != SET |
| + || !REG_P (SET_DEST (op)) |
| + || GET_CODE (SET_SRC (op)) == IF_THEN_ELSE |
| + || GET_MODE_SIZE (mode) > UNITS_PER_WORD ) |
| + return NULL_RTX; |
| + |
| + /* Check if any of the input operands to the insn is set inside the |
| + current block. */ |
| + if ( current_bb->index == ce_info->then_bb->index ) |
| + start = PREV_INSN (BB_HEAD (ce_info->then_bb)); |
| + else |
| + start = PREV_INSN (BB_HEAD (ce_info->else_bb)); |
| + |
| + |
| + for ( bb_insn = next_nonnote_insn (start); bb_insn != insn; bb_insn = next_nonnote_insn (bb_insn) ) |
| + { |
| + rtx set = single_set (bb_insn); |
| + |
| + if ( set && reg_mentioned_p (SET_DEST (set), SET_SRC (op))) |
| + { |
| + inputs_set_outside_ifblock = 0; |
| + break; |
| + } |
| + } |
| + |
| + cmp_insn = prev_nonnote_insn (BB_END (ce_info->test_bb)); |
| + |
| + |
| + /* Check if we can insert more insns. */ |
| + num_insns = ( ce_info->num_then_insns + |
| + ce_info->num_else_insns + |
| + ce_info->num_cond_clobber_insns + |
| + ce_info->num_extra_move_insns ); |
| + |
| + if ( ce_info->num_else_insns != 0 ) |
| + max_insns *=2; |
| + |
| + if ( num_insns >= max_insns ) |
| + return NULL_RTX; |
| + |
| + /* Check if we have an instruction which might be converted to |
| + conditional form if we give it a scratch register to clobber. */ |
| + { |
| + rtx clobber_insn; |
| + rtx scratch_reg = gen_reg_rtx (mode); |
| + rtx new_pattern = copy_rtx (pattern); |
| + rtx set_src = SET_SRC (COND_EXEC_CODE (new_pattern)); |
| + |
| + rtx clobber = gen_rtx_CLOBBER (mode, scratch_reg); |
| + rtx vec[2] = { COND_EXEC_CODE (new_pattern), clobber }; |
| + COND_EXEC_CODE (new_pattern) = gen_rtx_PARALLEL (mode, gen_rtvec_v (2, vec)); |
| + |
| + start_sequence (); |
| + clobber_insn = emit_insn (new_pattern); |
| + |
| + if ( recog_memoized (clobber_insn) >= 0 |
| + && ( ( GET_RTX_LENGTH (GET_CODE (set_src)) == 2 |
| + && CONST_INT_P (XEXP (set_src, 1)) |
| + && avr32_const_ok_for_constraint_p (INTVAL (XEXP (set_src, 1)), 'K', "Ks08") ) |
| + || !ce_info->else_bb |
| + || current_bb->index == ce_info->else_bb->index )) |
| + { |
| + end_sequence (); |
| + /* Force the insn to be recognized again. */ |
| + INSN_CODE (insn) = -1; |
| + |
| + /* If this is the first change in this IF-block then |
| + signal that we have made a change. */ |
| + if ( ce_info->num_cond_clobber_insns == 0 |
| + && ce_info->num_extra_move_insns == 0 ) |
| + *num_true_changes += 1; |
| + |
| + ce_info->num_cond_clobber_insns++; |
| + |
| + if (dump_file) |
| + fprintf (dump_file, |
| + "\nReplacing INSN %d with an insn using a scratch register for later ifcvt passes...\n", |
| + INSN_UID (insn)); |
| + |
| + return COND_EXEC_CODE (new_pattern); |
| + } |
| + end_sequence (); |
| + } |
| + |
| + if ( inputs_set_outside_ifblock ) |
| + { |
| + /* Check if the insn before the cmp is an and which used |
| + together with the cmp can be optimized into a bld. If |
| + so then we should try to put the insn before the and |
| + so that we can catch the bld peephole. */ |
| + rtx set; |
| + rtx insn_before_cmp_insn = prev_nonnote_insn (cmp_insn); |
| + if (insn_before_cmp_insn |
| + && (set = single_set (insn_before_cmp_insn)) |
| + && GET_CODE (SET_SRC (set)) == AND |
| + && one_bit_set_operand (XEXP (SET_SRC (set), 1), SImode) |
| + /* Also make sure that the insn does not set any |
| + of the input operands to the insn we are pulling out. */ |
| + && !reg_mentioned_p (SET_DEST (set), SET_SRC (op)) ) |
| + cmp_insn = prev_nonnote_insn (cmp_insn); |
| + |
| + /* We can try to put the operation outside the if-then-else |
| + blocks and insert a move. */ |
| + if ( !insn_invalid_p (insn) |
| + /* Do not allow conditional insns to be moved outside the |
| + if-then-else. */ |
| + && !reg_mentioned_p (cc0_rtx, insn) |
| + /* We cannot move memory loads outside of the if-then-else |
| + since the memory access should not be perfomed if the |
| + condition is not met. */ |
| + && !mem_mentioned_p (SET_SRC (op)) ) |
| + { |
| + rtx scratch_reg = gen_reg_rtx (mode); |
| + rtx op_pattern = copy_rtx (op); |
| + rtx new_insn, seq; |
| + rtx link, prev_link; |
| + op = copy_rtx (op); |
| + /* Emit the operation to a temp reg before the compare, |
| + and emit a move inside the if-then-else, hoping that the |
| + whole if-then-else can be converted to conditional |
| + execution. */ |
| + SET_DEST (op_pattern) = scratch_reg; |
| + start_sequence (); |
| + new_insn = emit_insn (op_pattern); |
| + seq = get_insns(); |
| + end_sequence (); |
| + |
| + /* Check again that the insn is valid. For some insns the insn might |
| + become invalid if the destination register is changed. Ie. for mulacc |
| + operations. */ |
| + if ( insn_invalid_p (new_insn) ) |
| + return NULL_RTX; |
| + |
| + emit_insn_before_setloc (seq, cmp_insn, INSN_LOCATOR (insn)); |
| + |
| + if (dump_file) |
| + fprintf (dump_file, |
| + "\nMoving INSN %d out of IF-block by adding INSN %d...\n", |
| + INSN_UID (insn), INSN_UID (new_insn)); |
| + |
| + ce_info->extra_move_insns[ce_info->num_extra_move_insns] = insn; |
| + ce_info->moved_insns[ce_info->num_extra_move_insns] = new_insn; |
| + XEXP (op, 1) = scratch_reg; |
| + /* Force the insn to be recognized again. */ |
| + INSN_CODE (insn) = -1; |
| + |
| + /* Move REG_DEAD notes to the moved insn. */ |
| + prev_link = NULL_RTX; |
| + for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) |
| + { |
| + if (REG_NOTE_KIND (link) == REG_DEAD) |
| + { |
| + /* Add the REG_DEAD note to the new insn. */ |
| + rtx dead_reg = XEXP (link, 0); |
| + REG_NOTES (new_insn) = gen_rtx_EXPR_LIST (REG_DEAD, dead_reg, REG_NOTES (new_insn)); |
| + /* Remove the REG_DEAD note from the insn we convert to a move. */ |
| + if ( prev_link ) |
| + XEXP (prev_link, 1) = XEXP (link, 1); |
| + else |
| + REG_NOTES (insn) = XEXP (link, 1); |
| + } |
| + else |
| + { |
| + prev_link = link; |
| + } |
| + } |
| + /* Add a REG_DEAD note to signal that the scratch register is dead. */ |
| + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, scratch_reg, REG_NOTES (insn)); |
| + |
| + /* If this is the first change in this IF-block then |
| + signal that we have made a change. */ |
| + if ( ce_info->num_cond_clobber_insns == 0 |
| + && ce_info->num_extra_move_insns == 0 ) |
| + *num_true_changes += 1; |
| + |
| + ce_info->num_extra_move_insns++; |
| + return op; |
| + } |
| + } |
| + |
| + /* We failed to fixup the insns, so this if-then-else can not be made |
| + conditional. Just return NULL_RTX so that the if-then-else conversion |
| + for this if-then-else will be cancelled. */ |
| + return NULL_RTX; |
| + } |
| + end_sequence (); |
| + return op; |
| + } |
| + |
| + /* Signal that we have started if conversion after reload, which means |
| + that it should be safe to split all the predicable clobber insns which |
| + did not become cond_exec back into a simpler form if possible. */ |
| + cfun->machine->ifcvt_after_reload = 1; |
| + |
| + return pattern; |
| +} |
| + |
| + |
| +void |
| +avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info, |
| + int *num_true_changes) |
| +{ |
| + int n; |
| + |
| + if ( ce_info->num_extra_move_insns > 0 |
| + && ce_info->num_cond_clobber_insns == 0) |
| + /* Signal that we did not do any changes after all. */ |
| + *num_true_changes -= 1; |
| + |
| + /* Remove any inserted move insns. */ |
| + for ( n = 0; n < ce_info->num_extra_move_insns; n++ ) |
| + { |
| + rtx link, prev_link; |
| + |
| + /* Remove REG_DEAD note since we are not needing the scratch register anyway. */ |
| + prev_link = NULL_RTX; |
| + for (link = REG_NOTES (ce_info->extra_move_insns[n]); link; link = XEXP (link, 1)) |
| + { |
| + if (REG_NOTE_KIND (link) == REG_DEAD) |
| + { |
| + if ( prev_link ) |
| + XEXP (prev_link, 1) = XEXP (link, 1); |
| + else |
| + REG_NOTES (ce_info->extra_move_insns[n]) = XEXP (link, 1); |
| + } |
| + else |
| + { |
| + prev_link = link; |
| + } |
| + } |
| + |
| + /* Revert all reg_notes for the moved insn. */ |
| + for (link = REG_NOTES (ce_info->moved_insns[n]); link; link = XEXP (link, 1)) |
| + { |
| + REG_NOTES (ce_info->extra_move_insns[n]) = gen_rtx_EXPR_LIST (REG_NOTE_KIND (link), |
| + XEXP (link, 0), |
| + REG_NOTES (ce_info->extra_move_insns[n])); |
| + } |
| + |
| + /* Remove the moved insn. */ |
| + remove_insn ( ce_info->moved_insns[n] ); |
| + } |
| +} |
| + |
| +/* Function returning TRUE if INSN with OPERANDS is a splittable |
| + conditional immediate clobber insn. We assume that the insn is |
| + already a conditional immediate clobber insns and do not check |
| + for that. */ |
| +int |
| +avr32_cond_imm_clobber_splittable (rtx insn, |
| + rtx operands[]) |
| +{ |
| + if ( (REGNO (operands[0]) != REGNO (operands[1])) |
| + && (logical_binary_operator (SET_SRC (XVECEXP (PATTERN (insn),0,0)), VOIDmode) |
| + || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == PLUS |
| + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'I', "Is16")) |
| + || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == MINUS |
| + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks16"))) ) |
| + return FALSE; |
| + |
| + return TRUE; |
| +} |
| + |
| +/* Function for getting an integer value from a const_int or const_double |
| + expression regardless of the HOST_WIDE_INT size. Each target cpu word |
| + will be put into the val array where the LSW will be stored at the lowest |
| + address and so forth. Assumes that const_expr is either a const_int or |
| + const_double. Only valid for modes which have sizes that are a multiple |
| + of the word size. |
| +*/ |
| +void |
| +avr32_get_intval (enum machine_mode mode, |
| + rtx const_expr, |
| + HOST_WIDE_INT *val) |
| +{ |
| + int words_in_mode = GET_MODE_SIZE (mode)/UNITS_PER_WORD; |
| + const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD; |
| + |
| + if ( GET_CODE(const_expr) == CONST_DOUBLE ){ |
| + HOST_WIDE_INT hi = CONST_DOUBLE_HIGH(const_expr); |
| + HOST_WIDE_INT lo = CONST_DOUBLE_LOW(const_expr); |
| + /* Evaluate hi and lo values of const_double. */ |
| + avr32_get_intval (mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0), |
| + GEN_INT (lo), |
| + &val[0]); |
| + avr32_get_intval (mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0), |
| + GEN_INT (hi), |
| + &val[words_in_const_int]); |
| + } else if ( GET_CODE(const_expr) == CONST_INT ){ |
| + HOST_WIDE_INT value = INTVAL(const_expr); |
| + int word; |
| + for ( word = 0; (word < words_in_mode) && (word < words_in_const_int); word++ ){ |
| + /* Shift word up to the MSW and shift down again to extract the |
| + word and sign-extend. */ |
| + int lshift = (words_in_const_int - word - 1) * BITS_PER_WORD; |
| + int rshift = (words_in_const_int-1) * BITS_PER_WORD; |
| + val[word] = (value << lshift) >> rshift; |
| + } |
| + |
| + for ( ; word < words_in_mode; word++ ){ |
| + /* Just put the sign bits in the remaining words. */ |
| + val[word] = value < 0 ? -1 : 0; |
| + } |
| + } |
| +} |
| + |
| +void |
| +avr32_split_const_expr (enum machine_mode mode, |
| + enum machine_mode new_mode, |
| + rtx expr, |
| + rtx *split_expr) |
| +{ |
| + int i, word; |
| + int words_in_intval = GET_MODE_SIZE (mode)/UNITS_PER_WORD; |
| + int words_in_split_values = GET_MODE_SIZE (new_mode)/UNITS_PER_WORD; |
| + const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD; |
| + HOST_WIDE_INT *val = alloca (words_in_intval * UNITS_PER_WORD); |
| + |
| + avr32_get_intval (mode, expr, val); |
| + |
| + for ( i=0; i < (words_in_intval/words_in_split_values); i++ ) |
| + { |
| + HOST_WIDE_INT value_lo = 0, value_hi = 0; |
| + for ( word = 0; word < words_in_split_values; word++ ) |
| + { |
| + if ( word >= words_in_const_int ) |
| + value_hi |= ((val[i * words_in_split_values + word] & |
| + (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1)) |
| + << (BITS_PER_WORD * (word - words_in_const_int))); |
| + else |
| + value_lo |= ((val[i * words_in_split_values + word] & |
| + (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1)) |
| + << (BITS_PER_WORD * word)); |
| + } |
| + split_expr[i] = immed_double_const(value_lo, value_hi, new_mode); |
| + } |
| +} |
| + |
| + |
| +/* Set up library functions to comply to AVR32 ABI */ |
| + |
| +static void |
| +avr32_init_libfuncs (void) |
| +{ |
| + /* Convert gcc run-time function names to AVR32 ABI names */ |
| + |
| + /* Double-precision floating-point arithmetic. */ |
| + set_optab_libfunc (neg_optab, DFmode, NULL); |
| + |
| + /* Double-precision comparisons. */ |
| + set_optab_libfunc (eq_optab, DFmode, "__avr32_f64_cmp_eq"); |
| + set_optab_libfunc (ne_optab, DFmode, NULL); |
| + set_optab_libfunc (lt_optab, DFmode, "__avr32_f64_cmp_lt"); |
| + set_optab_libfunc (le_optab, DFmode, NULL); |
| + set_optab_libfunc (ge_optab, DFmode, "__avr32_f64_cmp_ge"); |
| + set_optab_libfunc (gt_optab, DFmode, NULL); |
| + |
| + /* Single-precision floating-point arithmetic. */ |
| + set_optab_libfunc (smul_optab, SFmode, "__avr32_f32_mul"); |
| + set_optab_libfunc (neg_optab, SFmode, NULL); |
| + |
| + /* Single-precision comparisons. */ |
| + set_optab_libfunc (eq_optab, SFmode, "__avr32_f32_cmp_eq"); |
| + set_optab_libfunc (ne_optab, SFmode, NULL); |
| + set_optab_libfunc (lt_optab, SFmode, "__avr32_f32_cmp_lt"); |
| + set_optab_libfunc (le_optab, SFmode, NULL); |
| + set_optab_libfunc (ge_optab, SFmode, "__avr32_f32_cmp_ge"); |
| + set_optab_libfunc (gt_optab, SFmode, NULL); |
| + |
| + /* Floating-point to integer conversions. */ |
| + set_conv_libfunc (sfix_optab, SImode, DFmode, "__avr32_f64_to_s32"); |
| + set_conv_libfunc (ufix_optab, SImode, DFmode, "__avr32_f64_to_u32"); |
| + set_conv_libfunc (sfix_optab, DImode, DFmode, "__avr32_f64_to_s64"); |
| + set_conv_libfunc (ufix_optab, DImode, DFmode, "__avr32_f64_to_u64"); |
| + set_conv_libfunc (sfix_optab, SImode, SFmode, "__avr32_f32_to_s32"); |
| + set_conv_libfunc (ufix_optab, SImode, SFmode, "__avr32_f32_to_u32"); |
| + set_conv_libfunc (sfix_optab, DImode, SFmode, "__avr32_f32_to_s64"); |
| + set_conv_libfunc (ufix_optab, DImode, SFmode, "__avr32_f32_to_u64"); |
| + |
| + /* Conversions between floating types. */ |
| + set_conv_libfunc (trunc_optab, SFmode, DFmode, "__avr32_f64_to_f32"); |
| + set_conv_libfunc (sext_optab, DFmode, SFmode, "__avr32_f32_to_f64"); |
| + |
| + /* Integer to floating-point conversions. Table 8. */ |
| + set_conv_libfunc (sfloat_optab, DFmode, SImode, "__avr32_s32_to_f64"); |
| + set_conv_libfunc (sfloat_optab, DFmode, DImode, "__avr32_s64_to_f64"); |
| + set_conv_libfunc (sfloat_optab, SFmode, SImode, "__avr32_s32_to_f32"); |
| + set_conv_libfunc (sfloat_optab, SFmode, DImode, "__avr32_s64_to_f32"); |
| + set_conv_libfunc (ufloat_optab, DFmode, SImode, "__avr32_u32_to_f64"); |
| + set_conv_libfunc (ufloat_optab, SFmode, SImode, "__avr32_u32_to_f32"); |
| + /* TODO: Add these to gcc library functions */ |
| + //set_conv_libfunc (ufloat_optab, DFmode, DImode, NULL); |
| + //set_conv_libfunc (ufloat_optab, SFmode, DImode, NULL); |
| + |
| + /* Long long. Table 9. */ |
| + set_optab_libfunc (smul_optab, DImode, "__avr32_mul64"); |
| + set_optab_libfunc (sdiv_optab, DImode, "__avr32_sdiv64"); |
| + set_optab_libfunc (udiv_optab, DImode, "__avr32_udiv64"); |
| + set_optab_libfunc (smod_optab, DImode, "__avr32_smod64"); |
| + set_optab_libfunc (umod_optab, DImode, "__avr32_umod64"); |
| + set_optab_libfunc (ashl_optab, DImode, "__avr32_lsl64"); |
| + set_optab_libfunc (lshr_optab, DImode, "__avr32_lsr64"); |
| + set_optab_libfunc (ashr_optab, DImode, "__avr32_asr64"); |
| + |
| + /* Floating point library functions which have fast versions. */ |
| + if ( TARGET_FAST_FLOAT ) |
| + { |
| + set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div_fast"); |
| + set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul_fast"); |
| + set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add_fast"); |
| + set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub_fast"); |
| + set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add_fast"); |
| + set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub_fast"); |
| + set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div_fast"); |
| + } |
| + else |
| + { |
| + set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div"); |
| + set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul"); |
| + set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add"); |
| + set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub"); |
| + set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add"); |
| + set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub"); |
| + set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div"); |
| + } |
| +} |
| --- /dev/null |
| +++ b/gcc/config/avr32/avr32-elf.h |
| @@ -0,0 +1,84 @@ |
| +/* |
| + Elf specific definitions. |
| + Copyright 2003-2006 Atmel Corporation. |
| + |
| + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| + |
| + This file is part of GCC. |
| + |
| + This program is free software; you can redistribute it and/or modify |
| + it under the terms of the GNU General Public License as published by |
| + the Free Software Foundation; either version 2 of the License, or |
| + (at your option) any later version. |
| + |
| + This program is distributed in the hope that it will be useful, |
| + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + GNU General Public License for more details. |
| + |
| + You should have received a copy of the GNU General Public License |
| + along with this program; if not, write to the Free Software |
| + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ |
| + |
| + |
| +/***************************************************************************** |
| + * Controlling the Compilator Driver, 'gcc' |
| + *****************************************************************************/ |
| + |
| +/* Run-time Target Specification. */ |
| +#undef TARGET_VERSION |
| +#define TARGET_VERSION fputs (" (AVR32 GNU with ELF)", stderr); |
| + |
| +/* |
| +Another C string constant used much like LINK_SPEC. The |
| +difference between the two is that STARTFILE_SPEC is used at |
| +the very beginning of the command given to the linker. |
| + |
| +If this macro is not defined, a default is provided that loads the |
| +standard C startup file from the usual place. See gcc.c. |
| +*/ |
| +#undef STARTFILE_SPEC |
| +#define STARTFILE_SPEC "crt0%O%s crti%O%s crtbegin%O%s" |
| + |
| +#undef LINK_SPEC |
| +#define LINK_SPEC "%{muse-oscall:--defsym __do_not_use_oscall_coproc__=0} %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}} %{mpart=uc3a3revd:-mavr32elf_uc3a3256s;:%{mpart=*:-mavr32elf_%*}} %{mcpu=*:-mavr32elf_%*}" |
| + |
| + |
| +/* |
| +Another C string constant used much like LINK_SPEC. The |
| +difference between the two is that ENDFILE_SPEC is used at |
| +the very end of the command given to the linker. |
| + |
| +Do not define this macro if it does not need to do anything. |
| +*/ |
| +#undef ENDFILE_SPEC |
| +#define ENDFILE_SPEC "crtend%O%s crtn%O%s" |
| + |
| + |
| +/* Target CPU builtins. */ |
| +#define TARGET_CPU_CPP_BUILTINS() \ |
| + do \ |
| + { \ |
| + builtin_define ("__avr32__"); \ |
| + builtin_define ("__AVR32__"); \ |
| + builtin_define ("__AVR32_ELF__"); \ |
| + builtin_define (avr32_part->macro); \ |
| + builtin_define (avr32_arch->macro); \ |
| + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \ |
| + builtin_define ("__AVR32_AVR32A__"); \ |
| + else \ |
| + builtin_define ("__AVR32_AVR32B__"); \ |
| + if (TARGET_UNALIGNED_WORD) \ |
| + builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \ |
| + if (TARGET_SIMD) \ |
| + builtin_define ("__AVR32_HAS_SIMD__"); \ |
| + if (TARGET_DSP) \ |
| + builtin_define ("__AVR32_HAS_DSP__"); \ |
| + if (TARGET_RMW) \ |
| + builtin_define ("__AVR32_HAS_RMW__"); \ |
| + if (TARGET_BRANCH_PRED) \ |
| + builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \ |
| + if (TARGET_FAST_FLOAT) \ |
| + builtin_define ("__AVR32_FAST_FLOAT__"); \ |
| + } \ |
| + while (0) |
| --- /dev/null |
| +++ b/gcc/config/avr32/avr32.h |
| @@ -0,0 +1,3347 @@ |
| +/* |
| + Definitions of target machine for AVR32. |
| + Copyright 2003-2006 Atmel Corporation. |
| + |
| + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| + Initial porting by Anders �dland. |
| + |
| + This file is part of GCC. |
| + |
| + This program is free software; you can redistribute it and/or modify |
| + it under the terms of the GNU General Public License as published by |
| + the Free Software Foundation; either version 2 of the License, or |
| + (at your option) any later version. |
| + |
| + This program is distributed in the hope that it will be useful, |
| + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + GNU General Public License for more details. |
| + |
| + You should have received a copy of the GNU General Public License |
| + along with this program; if not, write to the Free Software |
| + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ |
| + |
| +#ifndef GCC_AVR32_H |
| +#define GCC_AVR32_H |
| + |
| + |
| +#ifndef OBJECT_FORMAT_ELF |
| +#error avr32.h included before elfos.h |
| +#endif |
| + |
| +#ifndef LOCAL_LABEL_PREFIX |
| +#define LOCAL_LABEL_PREFIX "." |
| +#endif |
| + |
| +#ifndef SUBTARGET_CPP_SPEC |
| +#define SUBTARGET_CPP_SPEC "-D__ELF__" |
| +#endif |
| + |
| + |
| +extern struct rtx_def *avr32_compare_op0; |
| +extern struct rtx_def *avr32_compare_op1; |
| + |
| + |
| +extern struct rtx_def *avr32_acc_cache; |
| + |
| +/* cache instruction op5 codes */ |
| +#define AVR32_CACHE_INVALIDATE_ICACHE 1 |
| + |
| +/* These bits describe the different types of function supported |
| + by the AVR32 backend. They are exclusive. ie a function cannot be both a |
| + normal function and an interworked function, for example. Knowing the |
| + type of a function is important for determining its prologue and |
| + epilogue sequences. |
| + Note value 7 is currently unassigned. Also note that the interrupt |
| + function types all have bit 2 set, so that they can be tested for easily. |
| + Note that 0 is deliberately chosen for AVR32_FT_UNKNOWN so that when the |
| + machine_function structure is initialized (to zero) func_type will |
| + default to unknown. This will force the first use of avr32_current_func_type |
| + to call avr32_compute_func_type. */ |
| +#define AVR32_FT_UNKNOWN 0 /* Type has not yet been determined. |
| + */ |
| +#define AVR32_FT_NORMAL 1 /* Your normal, straightforward |
| + function. */ |
| +#define AVR32_FT_ACALL 2 /* An acall function. */ |
| +#define AVR32_FT_EXCEPTION_HANDLER 3 /* A C++ exception handler. */ |
| +#define AVR32_FT_ISR_FULL 4 /* A fully shadowed interrupt mode. */ |
| +#define AVR32_FT_ISR_HALF 5 /* A half shadowed interrupt mode. */ |
| +#define AVR32_FT_ISR_NONE 6 /* No shadow registers. */ |
| + |
| +#define AVR32_FT_TYPE_MASK ((1 << 3) - 1) |
| + |
| +/* In addition functions can have several type modifiers, |
| + outlined by these bit masks: */ |
| +#define AVR32_FT_INTERRUPT (1 << 2) /* Note overlap with FT_ISR |
| + and above. */ |
| +#define AVR32_FT_NAKED (1 << 3) /* No prologue or epilogue. */ |
| +#define AVR32_FT_VOLATILE (1 << 4) /* Does not return. */ |
| +#define AVR32_FT_NESTED (1 << 5) /* Embedded inside another |
| + func. */ |
| + |
| +/* Some macros to test these flags. */ |
| +#define AVR32_FUNC_TYPE(t) (t & AVR32_FT_TYPE_MASK) |
| +#define IS_INTERRUPT(t) (t & AVR32_FT_INTERRUPT) |
| +#define IS_VOLATILE(t) (t & AVR32_FT_VOLATILE) |
| +#define IS_NAKED(t) (t & AVR32_FT_NAKED) |
| +#define IS_NESTED(t) (t & AVR32_FT_NESTED) |
| + |
| +#define SYMBOL_FLAG_RMW_ADDR_SHIFT SYMBOL_FLAG_MACH_DEP_SHIFT |
| +#define SYMBOL_REF_RMW_ADDR(RTX) \ |
| + ((SYMBOL_REF_FLAGS (RTX) & (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT)) != 0) |
| + |
| + |
| +typedef struct minipool_labels |
| +GTY ((chain_next ("%h.next"), chain_prev ("%h.prev"))) |
| +{ |
| + rtx label; |
| + struct minipool_labels *prev; |
| + struct minipool_labels *next; |
| +} minipool_labels; |
| + |
| +/* A C structure for machine-specific, per-function data. |
| + This is added to the cfun structure. */ |
| + |
| +typedef struct machine_function |
| +GTY (()) |
| +{ |
| + /* Records the type of the current function. */ |
| + unsigned long func_type; |
| + /* List of minipool labels, use for checking if code label is valid in a |
| + memory expression */ |
| + minipool_labels *minipool_label_head; |
| + minipool_labels *minipool_label_tail; |
| + int ifcvt_after_reload; |
| +} machine_function; |
| + |
| +/* Initialize data used by insn expanders. This is called from insn_emit, |
| + once for every function before code is generated. */ |
| +#define INIT_EXPANDERS avr32_init_expanders () |
| + |
| +/****************************************************************************** |
| + * SPECS |
| + *****************************************************************************/ |
| + |
| +#ifndef ASM_SPEC |
| +#define ASM_SPEC "%{fpic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{march=ucr2nomul:-march=ucr2;:%{march=*:-march=%*}} %{mpart=uc3a3revd:-mpart=uc3a3256s;:%{mpart=*:-mpart=%*}}" |
| +#endif |
| + |
| +#ifndef MULTILIB_DEFAULTS |
| +#define MULTILIB_DEFAULTS { "march=ap", "" } |
| +#endif |
| + |
| +/****************************************************************************** |
| + * Run-time Target Specification |
| + *****************************************************************************/ |
| +#ifndef TARGET_VERSION |
| +#define TARGET_VERSION fprintf(stderr, " (AVR32, GNU assembler syntax)"); |
| +#endif |
| + |
| + |
| +/* Part types. Keep this in sync with the order of avr32_part_types in avr32.c*/ |
| +enum part_type |
| +{ |
| + PART_TYPE_AVR32_NONE, |
| + PART_TYPE_AVR32_AP7000, |
| + PART_TYPE_AVR32_AP7001, |
| + PART_TYPE_AVR32_AP7002, |
| + PART_TYPE_AVR32_AP7200, |
| + PART_TYPE_AVR32_UC3A0128, |
| + PART_TYPE_AVR32_UC3A0256, |
| + PART_TYPE_AVR32_UC3A0512, |
| + PART_TYPE_AVR32_UC3A0512ES, |
| + PART_TYPE_AVR32_UC3A1128, |
| + PART_TYPE_AVR32_UC3A1256, |
| + PART_TYPE_AVR32_UC3A1512, |
| + PART_TYPE_AVR32_UC3A1512ES, |
| + PART_TYPE_AVR32_UC3A3REVD, |
| + PART_TYPE_AVR32_UC3A364, |
| + PART_TYPE_AVR32_UC3A364S, |
| + PART_TYPE_AVR32_UC3A3128, |
| + PART_TYPE_AVR32_UC3A3128S, |
| + PART_TYPE_AVR32_UC3A3256, |
| + PART_TYPE_AVR32_UC3A3256S, |
| + PART_TYPE_AVR32_UC3B064, |
| + PART_TYPE_AVR32_UC3B0128, |
| + PART_TYPE_AVR32_UC3B0256, |
| + PART_TYPE_AVR32_UC3B0256ES, |
| + PART_TYPE_AVR32_UC3B164, |
| + PART_TYPE_AVR32_UC3B1128, |
| + PART_TYPE_AVR32_UC3B1256, |
| + PART_TYPE_AVR32_UC3B1256ES |
| +}; |
| + |
| +/* Microarchitectures. */ |
| +enum microarchitecture_type |
| +{ |
| + UARCH_TYPE_AVR32A, |
| + UARCH_TYPE_AVR32B, |
| + UARCH_TYPE_NONE |
| +}; |
| + |
| +/* Architectures types which specifies the pipeline. |
| + Keep this in sync with avr32_arch_types in avr32.c |
| + and the pipeline attribute in avr32.md */ |
| +enum architecture_type |
| +{ |
| + ARCH_TYPE_AVR32_AP, |
| + ARCH_TYPE_AVR32_UCR1, |
| + ARCH_TYPE_AVR32_UCR2, |
| + ARCH_TYPE_AVR32_UCR2NOMUL, |
| + ARCH_TYPE_AVR32_NONE |
| +}; |
| + |
| +/* Flag specifying if the cpu has support for DSP instructions.*/ |
| +#define FLAG_AVR32_HAS_DSP (1 << 0) |
| +/* Flag specifying if the cpu has support for Read-Modify-Write |
| + instructions.*/ |
| +#define FLAG_AVR32_HAS_RMW (1 << 1) |
| +/* Flag specifying if the cpu has support for SIMD instructions. */ |
| +#define FLAG_AVR32_HAS_SIMD (1 << 2) |
| +/* Flag specifying if the cpu has support for unaligned memory word access. */ |
| +#define FLAG_AVR32_HAS_UNALIGNED_WORD (1 << 3) |
| +/* Flag specifying if the cpu has support for branch prediction. */ |
| +#define FLAG_AVR32_HAS_BRANCH_PRED (1 << 4) |
| +/* Flag specifying if the cpu has support for a return stack. */ |
| +#define FLAG_AVR32_HAS_RETURN_STACK (1 << 5) |
| +/* Flag specifying if the cpu has caches. */ |
| +#define FLAG_AVR32_HAS_CACHES (1 << 6) |
| +/* Flag specifying if the cpu has support for v2 insns. */ |
| +#define FLAG_AVR32_HAS_V2_INSNS (1 << 7) |
| +/* Flag specifying that the cpu has buggy mul insns. */ |
| +#define FLAG_AVR32_HAS_NO_MUL_INSNS (1 << 8) |
| + |
| +/* Structure for holding information about different avr32 CPUs/parts */ |
| +struct part_type_s |
| +{ |
| + const char *const name; |
| + enum part_type part_type; |
| + enum architecture_type arch_type; |
| + /* Must lie outside user's namespace. NULL == no macro. */ |
| + const char *const macro; |
| +}; |
| + |
| +/* Structure for holding information about different avr32 pipeline |
| + architectures. */ |
| +struct arch_type_s |
| +{ |
| + const char *const name; |
| + enum architecture_type arch_type; |
| + enum microarchitecture_type uarch_type; |
| + const unsigned long feature_flags; |
| + /* Must lie outside user's namespace. NULL == no macro. */ |
| + const char *const macro; |
| +}; |
| + |
| +extern const struct part_type_s *avr32_part; |
| +extern const struct arch_type_s *avr32_arch; |
| + |
| +#define TARGET_SIMD (avr32_arch->feature_flags & FLAG_AVR32_HAS_SIMD) |
| +#define TARGET_DSP (avr32_arch->feature_flags & FLAG_AVR32_HAS_DSP) |
| +#define TARGET_RMW (avr32_arch->feature_flags & FLAG_AVR32_HAS_RMW) |
| +#define TARGET_UNALIGNED_WORD (avr32_arch->feature_flags & FLAG_AVR32_HAS_UNALIGNED_WORD) |
| +#define TARGET_BRANCH_PRED (avr32_arch->feature_flags & FLAG_AVR32_HAS_BRANCH_PRED) |
| +#define TARGET_RETURN_STACK (avr32_arch->feature_flags & FLAG_AVR32_HAS_RETURN_STACK) |
| +#define TARGET_V2_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_V2_INSNS) |
| +#define TARGET_CACHES (avr32_arch->feature_flags & FLAG_AVR32_HAS_CACHES) |
| +#define TARGET_NO_MUL_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_NO_MUL_INSNS) |
| +#define TARGET_ARCH_AP (avr32_arch->arch_type == ARCH_TYPE_AVR32_AP) |
| +#define TARGET_ARCH_UCR1 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR1) |
| +#define TARGET_ARCH_UCR2 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR2) |
| +#define TARGET_ARCH_UC (TARGET_ARCH_UCR1 || TARGET_ARCH_UCR2) |
| +#define TARGET_UARCH_AVR32A (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) |
| +#define TARGET_UARCH_AVR32B (avr32_arch->uarch_type == UARCH_TYPE_AVR32B) |
| + |
| +#define CAN_DEBUG_WITHOUT_FP |
| + |
| + |
| + |
| + |
| +/****************************************************************************** |
| + * Storage Layout |
| + *****************************************************************************/ |
| + |
| +/* |
| +Define this macro to have the value 1 if the most significant bit in a |
| +byte has the lowest number; otherwise define it to have the value zero. |
| +This means that bit-field instructions count from the most significant |
| +bit. If the machine has no bit-field instructions, then this must still |
| +be defined, but it doesn't matter which value it is defined to. This |
| +macro need not be a constant. |
| + |
| +This macro does not affect the way structure fields are packed into |
| +bytes or words; that is controlled by BYTES_BIG_ENDIAN. |
| +*/ |
| +#define BITS_BIG_ENDIAN 0 |
| + |
| +/* |
| +Define this macro to have the value 1 if the most significant byte in a |
| +word has the lowest number. This macro need not be a constant. |
| +*/ |
| +/* |
| + Data is stored in an big-endian way. |
| +*/ |
| +#define BYTES_BIG_ENDIAN 1 |
| + |
| +/* |
| +Define this macro to have the value 1 if, in a multiword object, the |
| +most significant word has the lowest number. This applies to both |
| +memory locations and registers; GCC fundamentally assumes that the |
| +order of words in memory is the same as the order in registers. This |
| +macro need not be a constant. |
| +*/ |
| +/* |
| + Data is stored in an bin-endian way. |
| +*/ |
| +#define WORDS_BIG_ENDIAN 1 |
| + |
| +/* |
| +Define this macro if WORDS_BIG_ENDIAN is not constant. This must be a |
| +constant value with the same meaning as WORDS_BIG_ENDIAN, which will be |
| +used only when compiling libgcc2.c. Typically the value will be set |
| +based on preprocessor defines. |
| +*/ |
| +#define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN |
| + |
| +/* |
| +Define this macro to have the value 1 if DFmode, XFmode or |
| +TFmode floating point numbers are stored in memory with the word |
| +containing the sign bit at the lowest address; otherwise define it to |
| +have the value 0. This macro need not be a constant. |
| + |
| +You need not define this macro if the ordering is the same as for |
| +multi-word integers. |
| +*/ |
| +/* #define FLOAT_WORDS_BIG_ENDIAN 1 */ |
| + |
| +/* |
| +Define this macro to be the number of bits in an addressable storage |
| +unit (byte); normally 8. |
| +*/ |
| +#define BITS_PER_UNIT 8 |
| + |
| +/* |
| +Number of bits in a word; normally 32. |
| +*/ |
| +#define BITS_PER_WORD 32 |
| + |
| +/* |
| +Maximum number of bits in a word. If this is undefined, the default is |
| +BITS_PER_WORD. Otherwise, it is the constant value that is the |
| +largest value that BITS_PER_WORD can have at run-time. |
| +*/ |
| +/* MAX_BITS_PER_WORD not defined*/ |
| + |
| +/* |
| +Number of storage units in a word; normally 4. |
| +*/ |
| +#define UNITS_PER_WORD 4 |
| + |
| +/* |
| +Minimum number of units in a word. If this is undefined, the default is |
| +UNITS_PER_WORD. Otherwise, it is the constant value that is the |
| +smallest value that UNITS_PER_WORD can have at run-time. |
| +*/ |
| +/* MIN_UNITS_PER_WORD not defined */ |
| + |
| +/* |
| +Width of a pointer, in bits. You must specify a value no wider than the |
| +width of Pmode. If it is not equal to the width of Pmode, |
| +you must define POINTERS_EXTEND_UNSIGNED. |
| +*/ |
| +#define POINTER_SIZE 32 |
| + |
| +/* |
| +A C expression whose value is greater than zero if pointers that need to be |
| +extended from being POINTER_SIZE bits wide to Pmode are to |
| +be zero-extended and zero if they are to be sign-extended. If the value |
| +is less then zero then there must be an "ptr_extend" instruction that |
| +extends a pointer from POINTER_SIZE to Pmode. |
| + |
| +You need not define this macro if the POINTER_SIZE is equal |
| +to the width of Pmode. |
| +*/ |
| +/* #define POINTERS_EXTEND_UNSIGNED */ |
| + |
| +/* |
| +A Macro to update M and UNSIGNEDP when an object whose type |
| +is TYPE and which has the specified mode and signedness is to be |
| +stored in a register. This macro is only called when TYPE is a |
| +scalar type. |
| + |
| +On most RISC machines, which only have operations that operate on a full |
| +register, define this macro to set M to word_mode if |
| +M is an integer mode narrower than BITS_PER_WORD. In most |
| +cases, only integer modes should be widened because wider-precision |
| +floating-point operations are usually more expensive than their narrower |
| +counterparts. |
| + |
| +For most machines, the macro definition does not change UNSIGNEDP. |
| +However, some machines, have instructions that preferentially handle |
| +either signed or unsigned quantities of certain modes. For example, on |
| +the DEC Alpha, 32-bit loads from memory and 32-bit add instructions |
| +sign-extend the result to 64 bits. On such machines, set |
| +UNSIGNEDP according to which kind of extension is more efficient. |
| + |
| +Do not define this macro if it would never modify M. |
| +*/ |
| +#define PROMOTE_MODE(M, UNSIGNEDP, TYPE) \ |
| + { \ |
| + if (GET_MODE_CLASS (M) == MODE_INT \ |
| + && GET_MODE_SIZE (M) < 4) \ |
| + { \ |
| + if (M == QImode) \ |
| + UNSIGNEDP = 1; \ |
| + else if (M == SImode) \ |
| + UNSIGNEDP = 0; \ |
| + (M) = SImode; \ |
| + } \ |
| + } |
| + |
| +#define PROMOTE_FUNCTION_MODE(M, UNSIGNEDP, TYPE) \ |
| + { \ |
| + if (GET_MODE_CLASS (M) == MODE_INT \ |
| + && GET_MODE_SIZE (M) < 4) \ |
| + { \ |
| + (M) = SImode; \ |
| + } \ |
| + } |
| + |
| +/* Define if operations between registers always perform the operation |
| + on the full register even if a narrower mode is specified. */ |
| +#define WORD_REGISTER_OPERATIONS |
| + |
| +/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD |
| + will either zero-extend or sign-extend. The value of this macro should |
| + be the code that says which one of the two operations is implicitly |
| + done, UNKNOWN if not known. */ |
| +#define LOAD_EXTEND_OP(MODE) \ |
| + (((MODE) == QImode) ? ZERO_EXTEND \ |
| + : ((MODE) == HImode) ? SIGN_EXTEND : UNKNOWN) |
| + |
| + |
| +/* |
| +Define this macro if the promotion described by PROMOTE_MODE |
| +should only be performed for outgoing function arguments or |
| +function return values, as specified by PROMOTE_FUNCTION_ARGS |
| +and PROMOTE_FUNCTION_RETURN, respectively. |
| +*/ |
| +/* #define PROMOTE_FOR_CALL_ONLY */ |
| + |
| +/* |
| +Normal alignment required for function parameters on the stack, in |
| +bits. All stack parameters receive at least this much alignment |
| +regardless of data type. On most machines, this is the same as the |
| +size of an integer. |
| +*/ |
| +#define PARM_BOUNDARY 32 |
| + |
| +/* |
| +Define this macro to the minimum alignment enforced by hardware for the |
| +stack pointer on this machine. The definition is a C expression for the |
| +desired alignment (measured in bits). This value is used as a default |
| +if PREFERRED_STACK_BOUNDARY is not defined. On most machines, |
| +this should be the same as PARM_BOUNDARY. |
| +*/ |
| +#define STACK_BOUNDARY 32 |
| + |
| +/* |
| +Define this macro if you wish to preserve a certain alignment for the |
| +stack pointer, greater than what the hardware enforces. The definition |
| +is a C expression for the desired alignment (measured in bits). This |
| +macro must evaluate to a value equal to or larger than |
| +STACK_BOUNDARY. |
| +*/ |
| +#define PREFERRED_STACK_BOUNDARY (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 ) |
| + |
| +/* |
| +Alignment required for a function entry point, in bits. |
| +*/ |
| +#define FUNCTION_BOUNDARY 16 |
| + |
| +/* |
| +Biggest alignment that any data type can require on this machine, in bits. |
| +*/ |
| +#define BIGGEST_ALIGNMENT (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 ) |
| + |
| +/* |
| +If defined, the smallest alignment, in bits, that can be given to an |
| +object that can be referenced in one operation, without disturbing any |
| +nearby object. Normally, this is BITS_PER_UNIT, but may be larger |
| +on machines that don't have byte or half-word store operations. |
| +*/ |
| +#define MINIMUM_ATOMIC_ALIGNMENT BITS_PER_UNIT |
| + |
| + |
| +/* |
| +An integer expression for the size in bits of the largest integer machine mode that |
| +should actually be used. All integer machine modes of this size or smaller can be |
| +used for structures and unions with the appropriate sizes. If this macro is undefined, |
| +GET_MODE_BITSIZE (DImode) is assumed.*/ |
| +#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode) |
| + |
| + |
| +/* |
| +If defined, a C expression to compute the alignment given to a constant |
| +that is being placed in memory. CONSTANT is the constant and |
| +BASIC_ALIGN is the alignment that the object would ordinarily |
| +have. The value of this macro is used instead of that alignment to |
| +align the object. |
| + |
| +If this macro is not defined, then BASIC_ALIGN is used. |
| + |
| +The typical use of this macro is to increase alignment for string |
| +constants to be word aligned so that strcpy calls that copy |
| +constants can be done inline. |
| +*/ |
| +#define CONSTANT_ALIGNMENT(CONSTANT, BASIC_ALIGN) \ |
| + ((TREE_CODE(CONSTANT) == STRING_CST) ? BITS_PER_WORD : BASIC_ALIGN) |
| + |
| +/* Try to align string to a word. */ |
| +#define DATA_ALIGNMENT(TYPE, ALIGN) \ |
| + ({(TREE_CODE (TYPE) == ARRAY_TYPE \ |
| + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \ |
| + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));}) |
| + |
| +/* Try to align local store strings to a word. */ |
| +#define LOCAL_ALIGNMENT(TYPE, ALIGN) \ |
| + ({(TREE_CODE (TYPE) == ARRAY_TYPE \ |
| + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \ |
| + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));}) |
| + |
| +/* |
| +Define this macro to be the value 1 if instructions will fail to work |
| +if given data not on the nominal alignment. If instructions will merely |
| +go slower in that case, define this macro as 0. |
| +*/ |
| +#define STRICT_ALIGNMENT 1 |
| + |
| +/* |
| +Define this if you wish to imitate the way many other C compilers handle |
| +alignment of bit-fields and the structures that contain them. |
| + |
| +The behavior is that the type written for a bit-field (int, |
| +short, or other integer type) imposes an alignment for the |
| +entire structure, as if the structure really did contain an ordinary |
| +field of that type. In addition, the bit-field is placed within the |
| +structure so that it would fit within such a field, not crossing a |
| +boundary for it. |
| + |
| +Thus, on most machines, a bit-field whose type is written as int |
| +would not cross a four-byte boundary, and would force four-byte |
| +alignment for the whole structure. (The alignment used may not be four |
| +bytes; it is controlled by the other alignment parameters.) |
| + |
| +If the macro is defined, its definition should be a C expression; |
| +a nonzero value for the expression enables this behavior. |
| + |
| +Note that if this macro is not defined, or its value is zero, some |
| +bit-fields may cross more than one alignment boundary. The compiler can |
| +support such references if there are insv, extv, and |
| +extzv insns that can directly reference memory. |
| + |
| +The other known way of making bit-fields work is to define |
| +STRUCTURE_SIZE_BOUNDARY as large as BIGGEST_ALIGNMENT. |
| +Then every structure can be accessed with fullwords. |
| + |
| +Unless the machine has bit-field instructions or you define |
| +STRUCTURE_SIZE_BOUNDARY that way, you must define |
| +PCC_BITFIELD_TYPE_MATTERS to have a nonzero value. |
| + |
| +If your aim is to make GCC use the same conventions for laying out |
| +bit-fields as are used by another compiler, here is how to investigate |
| +what the other compiler does. Compile and run this program: |
| + |
| +struct foo1 |
| +{ |
| + char x; |
| + char :0; |
| + char y; |
| +}; |
| + |
| +struct foo2 |
| +{ |
| + char x; |
| + int :0; |
| + char y; |
| +}; |
| + |
| +main () |
| +{ |
| + printf ("Size of foo1 is %d\n", |
| + sizeof (struct foo1)); |
| + printf ("Size of foo2 is %d\n", |
| + sizeof (struct foo2)); |
| + exit (0); |
| +} |
| + |
| +If this prints 2 and 5, then the compiler's behavior is what you would |
| +get from PCC_BITFIELD_TYPE_MATTERS. |
| +*/ |
| +#define PCC_BITFIELD_TYPE_MATTERS 1 |
| + |
| + |
| +/****************************************************************************** |
| + * Layout of Source Language Data Types |
| + *****************************************************************************/ |
| + |
| +/* |
| +A C expression for the size in bits of the type int on the |
| +target machine. If you don't define this, the default is one word. |
| +*/ |
| +#define INT_TYPE_SIZE 32 |
| + |
| +/* |
| +A C expression for the size in bits of the type short on the |
| +target machine. If you don't define this, the default is half a word. (If |
| +this would be less than one storage unit, it is rounded up to one unit.) |
| +*/ |
| +#define SHORT_TYPE_SIZE 16 |
| + |
| +/* |
| +A C expression for the size in bits of the type long on the |
| +target machine. If you don't define this, the default is one word. |
| +*/ |
| +#define LONG_TYPE_SIZE 32 |
| + |
| + |
| +/* |
| +A C expression for the size in bits of the type long long on the |
| +target machine. If you don't define this, the default is two |
| +words. If you want to support GNU Ada on your machine, the value of this |
| +macro must be at least 64. |
| +*/ |
| +#define LONG_LONG_TYPE_SIZE 64 |
| + |
| +/* |
| +A C expression for the size in bits of the type char on the |
| +target machine. If you don't define this, the default is |
| +BITS_PER_UNIT. |
| +*/ |
| +#define CHAR_TYPE_SIZE 8 |
| + |
| + |
| +/* |
| +A C expression for the size in bits of the C++ type bool and |
| +C99 type _Bool on the target machine. If you don't define |
| +this, and you probably shouldn't, the default is CHAR_TYPE_SIZE. |
| +*/ |
| +#define BOOL_TYPE_SIZE 8 |
| + |
| + |
| +/* |
| +An expression whose value is 1 or 0, according to whether the type |
| +char should be signed or unsigned by default. The user can |
| +always override this default with the options -fsigned-char |
| +and -funsigned-char. |
| +*/ |
| +/* We are using unsigned char */ |
| +#define DEFAULT_SIGNED_CHAR 0 |
| + |
| + |
| +/* |
| +A C expression for a string describing the name of the data type to use |
| +for size values. The typedef name size_t is defined using the |
| +contents of the string. |
| + |
| +The string can contain more than one keyword. If so, separate them with |
| +spaces, and write first any length keyword, then unsigned if |
| +appropriate, and finally int. The string must exactly match one |
| +of the data type names defined in the function |
| +init_decl_processing in the file c-decl.c. You may not |
| +omit int or change the order - that would cause the compiler to |
| +crash on startup. |
| + |
| +If you don't define this macro, the default is "long unsigned int". |
| +*/ |
| +#define SIZE_TYPE "long unsigned int" |
| + |
| +/* |
| +A C expression for a string describing the name of the data type to use |
| +for the result of subtracting two pointers. The typedef name |
| +ptrdiff_t is defined using the contents of the string. See |
| +SIZE_TYPE above for more information. |
| + |
| +If you don't define this macro, the default is "long int". |
| +*/ |
| +#define PTRDIFF_TYPE "long int" |
| + |
| + |
| +/* |
| +A C expression for the size in bits of the data type for wide |
| +characters. This is used in cpp, which cannot make use of |
| +WCHAR_TYPE. |
| +*/ |
| +#define WCHAR_TYPE_SIZE 32 |
| + |
| + |
| +/* |
| +A C expression for a string describing the name of the data type to |
| +use for wide characters passed to printf and returned from |
| +getwc. The typedef name wint_t is defined using the |
| +contents of the string. See SIZE_TYPE above for more |
| +information. |
| + |
| +If you don't define this macro, the default is "unsigned int". |
| +*/ |
| +#define WINT_TYPE "unsigned int" |
| + |
| +/* |
| +A C expression for a string describing the name of the data type that |
| +can represent any value of any standard or extended signed integer type. |
| +The typedef name intmax_t is defined using the contents of the |
| +string. See SIZE_TYPE above for more information. |
| + |
| +If you don't define this macro, the default is the first of |
| +"int", "long int", or "long long int" that has as |
| +much precision as long long int. |
| +*/ |
| +#define INTMAX_TYPE "long long int" |
| + |
| +/* |
| +A C expression for a string describing the name of the data type that |
| +can represent any value of any standard or extended unsigned integer |
| +type. The typedef name uintmax_t is defined using the contents |
| +of the string. See SIZE_TYPE above for more information. |
| + |
| +If you don't define this macro, the default is the first of |
| +"unsigned int", "long unsigned int", or "long long unsigned int" |
| +that has as much precision as long long unsigned int. |
| +*/ |
| +#define UINTMAX_TYPE "long long unsigned int" |
| + |
| + |
| +/****************************************************************************** |
| + * Register Usage |
| + *****************************************************************************/ |
| + |
| +/* Convert from gcc internal register number to register number |
| + used in assembly code */ |
| +#define ASM_REGNUM(reg) (LAST_REGNUM - (reg)) |
| +#define ASM_FP_REGNUM(reg) (LAST_FP_REGNUM - (reg)) |
| + |
| +/* Convert between register number used in assembly to gcc |
| + internal register number */ |
| +#define INTERNAL_REGNUM(reg) (LAST_REGNUM - (reg)) |
| +#define INTERNAL_FP_REGNUM(reg) (LAST_FP_REGNUM - (reg)) |
| + |
| +/** Basic Characteristics of Registers **/ |
| + |
| +/* |
| +Number of hardware registers known to the compiler. They receive |
| +numbers 0 through FIRST_PSEUDO_REGISTER-1; thus, the first |
| +pseudo register's number really is assigned the number |
| +FIRST_PSEUDO_REGISTER. |
| +*/ |
| +#define FIRST_PSEUDO_REGISTER (LAST_FP_REGNUM + 1) |
| + |
| +#define FIRST_REGNUM 0 |
| +#define LAST_REGNUM 15 |
| +#define NUM_FP_REGS 16 |
| +#define FIRST_FP_REGNUM 16 |
| +#define LAST_FP_REGNUM (16+NUM_FP_REGS-1) |
| + |
| +/* |
| +An initializer that says which registers are used for fixed purposes |
| +all throughout the compiled code and are therefore not available for |
| +general allocation. These would include the stack pointer, the frame |
| +pointer (except on machines where that can be used as a general |
| +register when no frame pointer is needed), the program counter on |
| +machines where that is considered one of the addressable registers, |
| +and any other numbered register with a standard use. |
| + |
| +This information is expressed as a sequence of numbers, separated by |
| +commas and surrounded by braces. The nth number is 1 if |
| +register n is fixed, 0 otherwise. |
| + |
| +The table initialized from this macro, and the table initialized by |
| +the following one, may be overridden at run time either automatically, |
| +by the actions of the macro CONDITIONAL_REGISTER_USAGE, or by |
| +the user with the command options -ffixed-[reg], |
| +-fcall-used-[reg] and -fcall-saved-[reg]. |
| +*/ |
| + |
| +/* The internal gcc register numbers are reversed |
| + compared to the real register numbers since |
| + gcc expects data types stored over multiple |
| + registers in the register file to be big endian |
| + if the memory layout is big endian. But this |
| + is not the case for avr32 so we fake a big |
| + endian register file. */ |
| + |
| +#define FIXED_REGISTERS { \ |
| + 1, /* Program Counter */ \ |
| + 0, /* Link Register */ \ |
| + 1, /* Stack Pointer */ \ |
| + 0, /* r12 */ \ |
| + 0, /* r11 */ \ |
| + 0, /* r10 */ \ |
| + 0, /* r9 */ \ |
| + 0, /* r8 */ \ |
| + 0, /* r7 */ \ |
| + 0, /* r6 */ \ |
| + 0, /* r5 */ \ |
| + 0, /* r4 */ \ |
| + 0, /* r3 */ \ |
| + 0, /* r2 */ \ |
| + 0, /* r1 */ \ |
| + 0, /* r0 */ \ |
| + 0, /* f15 */ \ |
| + 0, /* f14 */ \ |
| + 0, /* f13 */ \ |
| + 0, /* f12 */ \ |
| + 0, /* f11 */ \ |
| + 0, /* f10 */ \ |
| + 0, /* f9 */ \ |
| + 0, /* f8 */ \ |
| + 0, /* f7 */ \ |
| + 0, /* f6 */ \ |
| + 0, /* f5 */ \ |
| + 0, /* f4 */ \ |
| + 0, /* f3 */ \ |
| + 0, /* f2*/ \ |
| + 0, /* f1 */ \ |
| + 0 /* f0 */ \ |
| +} |
| + |
| +/* |
| +Like FIXED_REGISTERS but has 1 for each register that is |
| +clobbered (in general) by function calls as well as for fixed |
| +registers. This macro therefore identifies the registers that are not |
| +available for general allocation of values that must live across |
| +function calls. |
| + |
| +If a register has 0 in CALL_USED_REGISTERS, the compiler |
| +automatically saves it on function entry and restores it on function |
| +exit, if the register is used within the function. |
| +*/ |
| +#define CALL_USED_REGISTERS { \ |
| + 1, /* Program Counter */ \ |
| + 0, /* Link Register */ \ |
| + 1, /* Stack Pointer */ \ |
| + 1, /* r12 */ \ |
| + 1, /* r11 */ \ |
| + 1, /* r10 */ \ |
| + 1, /* r9 */ \ |
| + 1, /* r8 */ \ |
| + 0, /* r7 */ \ |
| + 0, /* r6 */ \ |
| + 0, /* r5 */ \ |
| + 0, /* r4 */ \ |
| + 0, /* r3 */ \ |
| + 0, /* r2 */ \ |
| + 0, /* r1 */ \ |
| + 0, /* r0 */ \ |
| + 1, /* f15 */ \ |
| + 1, /* f14 */ \ |
| + 1, /* f13 */ \ |
| + 1, /* f12 */ \ |
| + 1, /* f11 */ \ |
| + 1, /* f10 */ \ |
| + 1, /* f9 */ \ |
| + 1, /* f8 */ \ |
| + 0, /* f7 */ \ |
| + 0, /* f6 */ \ |
| + 0, /* f5 */ \ |
| + 0, /* f4 */ \ |
| + 0, /* f3 */ \ |
| + 0, /* f2*/ \ |
| + 0, /* f1*/ \ |
| + 0, /* f0 */ \ |
| +} |
| + |
| +/* Interrupt functions can only use registers that have already been |
| + saved by the prologue, even if they would normally be |
| + call-clobbered. */ |
| +#define HARD_REGNO_RENAME_OK(SRC, DST) \ |
| + (! IS_INTERRUPT (cfun->machine->func_type) || \ |
| + regs_ever_live[DST]) |
| + |
| + |
| +/* |
| +Zero or more C statements that may conditionally modify five variables |
| +fixed_regs, call_used_regs, global_regs, |
| +reg_names, and reg_class_contents, to take into account |
| +any dependence of these register sets on target flags. The first three |
| +of these are of type char [] (interpreted as Boolean vectors). |
| +global_regs is a const char *[], and |
| +reg_class_contents is a HARD_REG_SET. Before the macro is |
| +called, fixed_regs, call_used_regs, |
| +reg_class_contents, and reg_names have been initialized |
| +from FIXED_REGISTERS, CALL_USED_REGISTERS, |
| +REG_CLASS_CONTENTS, and REGISTER_NAMES, respectively. |
| +global_regs has been cleared, and any -ffixed-[reg], |
| +-fcall-used-[reg] and -fcall-saved-[reg] |
| +command options have been applied. |
| + |
| +You need not define this macro if it has no work to do. |
| + |
| +If the usage of an entire class of registers depends on the target |
| +flags, you may indicate this to GCC by using this macro to modify |
| +fixed_regs and call_used_regs to 1 for each of the |
| +registers in the classes which should not be used by GCC. Also define |
| +the macro REG_CLASS_FROM_LETTER to return NO_REGS if it |
| +is called with a letter for a class that shouldn't be used. |
| + |
| + (However, if this class is not included in GENERAL_REGS and all |
| +of the insn patterns whose constraints permit this class are |
| +controlled by target switches, then GCC will automatically avoid using |
| +these registers when the target switches are opposed to them.) |
| +*/ |
| +#define CONDITIONAL_REGISTER_USAGE \ |
| + do \ |
| + { \ |
| + int regno; \ |
| + \ |
| + if (TARGET_SOFT_FLOAT) \ |
| + { \ |
| + for (regno = FIRST_FP_REGNUM; \ |
| + regno <= LAST_FP_REGNUM; ++regno) \ |
| + fixed_regs[regno] = call_used_regs[regno] = 1; \ |
| + } \ |
| + if (flag_pic) \ |
| + { \ |
| + fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \ |
| + call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \ |
| + } \ |
| + } \ |
| + while (0) |
| + |
| + |
| +/* |
| +If the program counter has a register number, define this as that |
| +register number. Otherwise, do not define it. |
| +*/ |
| + |
| +#define LAST_AVR32_REGNUM 16 |
| + |
| + |
| +/** Order of Allocation of Registers **/ |
| + |
| +/* |
| +If defined, an initializer for a vector of integers, containing the |
| +numbers of hard registers in the order in which GCC should prefer |
| +to use them (from most preferred to least). |
| + |
| +If this macro is not defined, registers are used lowest numbered first |
| +(all else being equal). |
| + |
| +One use of this macro is on machines where the highest numbered |
| +registers must always be saved and the save-multiple-registers |
| +instruction supports only sequences of consecutive registers. On such |
| +machines, define REG_ALLOC_ORDER to be an initializer that lists |
| +the highest numbered allocable register first. |
| +*/ |
| +#define REG_ALLOC_ORDER \ |
| +{ \ |
| + INTERNAL_REGNUM(8), \ |
| + INTERNAL_REGNUM(9), \ |
| + INTERNAL_REGNUM(10), \ |
| + INTERNAL_REGNUM(11), \ |
| + INTERNAL_REGNUM(12), \ |
| + LR_REGNUM, \ |
| + INTERNAL_REGNUM(7), \ |
| + INTERNAL_REGNUM(6), \ |
| + INTERNAL_REGNUM(5), \ |
| + INTERNAL_REGNUM(4), \ |
| + INTERNAL_REGNUM(3), \ |
| + INTERNAL_REGNUM(2), \ |
| + INTERNAL_REGNUM(1), \ |
| + INTERNAL_REGNUM(0), \ |
| + INTERNAL_FP_REGNUM(15), \ |
| + INTERNAL_FP_REGNUM(14), \ |
| + INTERNAL_FP_REGNUM(13), \ |
| + INTERNAL_FP_REGNUM(12), \ |
| + INTERNAL_FP_REGNUM(11), \ |
| + INTERNAL_FP_REGNUM(10), \ |
| + INTERNAL_FP_REGNUM(9), \ |
| + INTERNAL_FP_REGNUM(8), \ |
| + INTERNAL_FP_REGNUM(7), \ |
| + INTERNAL_FP_REGNUM(6), \ |
| + INTERNAL_FP_REGNUM(5), \ |
| + INTERNAL_FP_REGNUM(4), \ |
| + INTERNAL_FP_REGNUM(3), \ |
| + INTERNAL_FP_REGNUM(2), \ |
| + INTERNAL_FP_REGNUM(1), \ |
| + INTERNAL_FP_REGNUM(0), \ |
| + SP_REGNUM, \ |
| + PC_REGNUM \ |
| +} |
| + |
| + |
| +/** How Values Fit in Registers **/ |
| + |
| +/* |
| +A C expression for the number of consecutive hard registers, starting |
| +at register number REGNO, required to hold a value of mode |
| +MODE. |
| + |
| +On a machine where all registers are exactly one word, a suitable |
| +definition of this macro is |
| + |
| +#define HARD_REGNO_NREGS(REGNO, MODE) \ |
| + ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \ |
| + / UNITS_PER_WORD) |
| +*/ |
| +#define HARD_REGNO_NREGS(REGNO, MODE) \ |
| + ((unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD -1 ) / UNITS_PER_WORD)) |
| + |
| +/* |
| +A C expression that is nonzero if it is permissible to store a value |
| +of mode MODE in hard register number REGNO (or in several |
| +registers starting with that one). For a machine where all registers |
| +are equivalent, a suitable definition is |
| + |
| + #define HARD_REGNO_MODE_OK(REGNO, MODE) 1 |
| + |
| +You need not include code to check for the numbers of fixed registers, |
| +because the allocation mechanism considers them to be always occupied. |
| + |
| +On some machines, double-precision values must be kept in even/odd |
| +register pairs. You can implement that by defining this macro to reject |
| +odd register numbers for such modes. |
| + |
| +The minimum requirement for a mode to be OK in a register is that the |
| +mov[mode] instruction pattern support moves between the |
| +register and other hard register in the same class and that moving a |
| +value into the register and back out not alter it. |
| + |
| +Since the same instruction used to move word_mode will work for |
| +all narrower integer modes, it is not necessary on any machine for |
| +HARD_REGNO_MODE_OK to distinguish between these modes, provided |
| +you define patterns movhi, etc., to take advantage of this. This |
| +is useful because of the interaction between HARD_REGNO_MODE_OK |
| +and MODES_TIEABLE_P; it is very desirable for all integer modes |
| +to be tieable. |
| + |
| +Many machines have special registers for floating point arithmetic. |
| +Often people assume that floating point machine modes are allowed only |
| +in floating point registers. This is not true. Any registers that |
| +can hold integers can safely hold a floating point machine |
| +mode, whether or not floating arithmetic can be done on it in those |
| +registers. Integer move instructions can be used to move the values. |
| + |
| +On some machines, though, the converse is true: fixed-point machine |
| +modes may not go in floating registers. This is true if the floating |
| +registers normalize any value stored in them, because storing a |
| +non-floating value there would garble it. In this case, |
| +HARD_REGNO_MODE_OK should reject fixed-point machine modes in |
| +floating registers. But if the floating registers do not automatically |
| +normalize, if you can store any bit pattern in one and retrieve it |
| +unchanged without a trap, then any machine mode may go in a floating |
| +register, so you can define this macro to say so. |
| + |
| +The primary significance of special floating registers is rather that |
| +they are the registers acceptable in floating point arithmetic |
| +instructions. However, this is of no concern to |
| +HARD_REGNO_MODE_OK. You handle it by writing the proper |
| +constraints for those instructions. |
| + |
| +On some machines, the floating registers are especially slow to access, |
| +so that it is better to store a value in a stack frame than in such a |
| +register if floating point arithmetic is not being done. As long as the |
| +floating registers are not in class GENERAL_REGS, they will not |
| +be used unless some pattern's constraint asks for one. |
| +*/ |
| +#define HARD_REGNO_MODE_OK(REGNO, MODE) avr32_hard_regno_mode_ok(REGNO, MODE) |
| + |
| +/* |
| +A C expression that is nonzero if a value of mode |
| +MODE1 is accessible in mode MODE2 without copying. |
| + |
| +If HARD_REGNO_MODE_OK(R, MODE1) and |
| +HARD_REGNO_MODE_OK(R, MODE2) are always the same for |
| +any R, then MODES_TIEABLE_P(MODE1, MODE2) |
| +should be nonzero. If they differ for any R, you should define |
| +this macro to return zero unless some other mechanism ensures the |
| +accessibility of the value in a narrower mode. |
| + |
| +You should define this macro to return nonzero in as many cases as |
| +possible since doing so will allow GCC to perform better register |
| +allocation. |
| +*/ |
| +#define MODES_TIEABLE_P(MODE1, MODE2) \ |
| + (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2)) |
| + |
| + |
| + |
| +/****************************************************************************** |
| + * Register Classes |
| + *****************************************************************************/ |
| + |
| +/* |
| +An enumeral type that must be defined with all the register class names |
| +as enumeral values. NO_REGS must be first. ALL_REGS |
| +must be the last register class, followed by one more enumeral value, |
| +LIM_REG_CLASSES, which is not a register class but rather |
| +tells how many classes there are. |
| + |
| +Each register class has a number, which is the value of casting |
| +the class name to type int. The number serves as an index |
| +in many of the tables described below. |
| +*/ |
| +enum reg_class |
| +{ |
| + NO_REGS, |
| + GENERAL_REGS, |
| + FP_REGS, |
| + ALL_REGS, |
| + LIM_REG_CLASSES |
| +}; |
| + |
| +/* |
| +The number of distinct register classes, defined as follows: |
| + #define N_REG_CLASSES (int) LIM_REG_CLASSES |
| +*/ |
| +#define N_REG_CLASSES (int)LIM_REG_CLASSES |
| + |
| +/* |
| +An initializer containing the names of the register classes as C string |
| +constants. These names are used in writing some of the debugging dumps. |
| +*/ |
| +#define REG_CLASS_NAMES \ |
| +{ \ |
| + "NO_REGS", \ |
| + "GENERAL_REGS", \ |
| + "FLOATING_POINT_REGS", \ |
| + "ALL_REGS" \ |
| +} |
| + |
| +/* |
| +An initializer containing the contents of the register classes, as integers |
| +which are bit masks. The nth integer specifies the contents of class |
| +n. The way the integer mask is interpreted is that |
| +register r is in the class if mask & (1 << r) is 1. |
| + |
| +When the machine has more than 32 registers, an integer does not suffice. |
| +Then the integers are replaced by sub-initializers, braced groupings containing |
| +several integers. Each sub-initializer must be suitable as an initializer |
| +for the type HARD_REG_SET which is defined in hard-reg-set.h. |
| +In this situation, the first integer in each sub-initializer corresponds to |
| +registers 0 through 31, the second integer to registers 32 through 63, and |
| +so on. |
| +*/ |
| +#define REG_CLASS_CONTENTS { \ |
| + {0x00000000}, /* NO_REGS */ \ |
| + {0x0000FFFF}, /* GENERAL_REGS */ \ |
| + {0xFFFF0000}, /* FP_REGS */ \ |
| + {0x7FFFFFFF}, /* ALL_REGS */ \ |
| +} |
| + |
| + |
| +/* |
| +A C expression whose value is a register class containing hard register |
| +REGNO. In general there is more than one such class; choose a class |
| +which is minimal, meaning that no smaller class also contains the |
| +register. |
| +*/ |
| +#define REGNO_REG_CLASS(REGNO) ((REGNO < 16) ? GENERAL_REGS : FP_REGS) |
| + |
| +/* |
| +A macro whose definition is the name of the class to which a valid |
| +base register must belong. A base register is one used in an address |
| +which is the register value plus a displacement. |
| +*/ |
| +#define BASE_REG_CLASS GENERAL_REGS |
| + |
| +/* |
| +This is a variation of the BASE_REG_CLASS macro which allows |
| +the selection of a base register in a mode depenedent manner. If |
| +mode is VOIDmode then it should return the same value as |
| +BASE_REG_CLASS. |
| +*/ |
| +#define MODE_BASE_REG_CLASS(MODE) BASE_REG_CLASS |
| + |
| +/* |
| +A macro whose definition is the name of the class to which a valid |
| +index register must belong. An index register is one used in an |
| +address where its value is either multiplied by a scale factor or |
| +added to another register (as well as added to a displacement). |
| +*/ |
| +#define INDEX_REG_CLASS BASE_REG_CLASS |
| + |
| +/* |
| +A C expression which defines the machine-dependent operand constraint |
| +letters for register classes. If CHAR is such a letter, the |
| +value should be the register class corresponding to it. Otherwise, |
| +the value should be NO_REGS. The register letter r, |
| +corresponding to class GENERAL_REGS, will not be passed |
| +to this macro; you do not need to handle it. |
| +*/ |
| +#define REG_CLASS_FROM_LETTER(CHAR) ((CHAR) == 'f' ? FP_REGS : NO_REGS) |
| + |
| + |
| +/* These assume that REGNO is a hard or pseudo reg number. |
| + They give nonzero only if REGNO is a hard reg of the suitable class |
| + or a pseudo reg currently allocated to a suitable hard reg. |
| + Since they use reg_renumber, they are safe only once reg_renumber |
| + has been allocated, which happens in local-alloc.c. */ |
| +#define TEST_REGNO(R, TEST, VALUE) \ |
| + ((R TEST VALUE) || ((unsigned) reg_renumber[R] TEST VALUE)) |
| + |
| +/* |
| +A C expression which is nonzero if register number num is suitable for use as a base |
| +register in operand addresses. It may be either a suitable hard register or a pseudo |
| +register that has been allocated such a hard register. |
| +*/ |
| +#define REGNO_OK_FOR_BASE_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM) |
| + |
| +/* |
| +A C expression which is nonzero if register number NUM is |
| +suitable for use as an index register in operand addresses. It may be |
| +either a suitable hard register or a pseudo register that has been |
| +allocated such a hard register. |
| + |
| +The difference between an index register and a base register is that |
| +the index register may be scaled. If an address involves the sum of |
| +two registers, neither one of them scaled, then either one may be |
| +labeled the ``base'' and the other the ``index''; but whichever |
| +labeling is used must fit the machine's constraints of which registers |
| +may serve in each capacity. The compiler will try both labelings, |
| +looking for one that is valid, and will reload one or both registers |
| +only if neither labeling works. |
| +*/ |
| +#define REGNO_OK_FOR_INDEX_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM) |
| + |
| +/* |
| +A C expression that places additional restrictions on the register class |
| +to use when it is necessary to copy value X into a register in class |
| +CLASS. The value is a register class; perhaps CLASS, or perhaps |
| +another, smaller class. On many machines, the following definition is |
| +safe: #define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS |
| + |
| +Sometimes returning a more restrictive class makes better code. For |
| +example, on the 68000, when X is an integer constant that is in range |
| +for a 'moveq' instruction, the value of this macro is always |
| +DATA_REGS as long as CLASS includes the data registers. |
| +Requiring a data register guarantees that a 'moveq' will be used. |
| + |
| +If X is a const_double, by returning NO_REGS |
| +you can force X into a memory constant. This is useful on |
| +certain machines where immediate floating values cannot be loaded into |
| +certain kinds of registers. |
| +*/ |
| +#define PREFERRED_RELOAD_CLASS(X, CLASS) CLASS |
| + |
| + |
| + |
| +/* |
| +A C expression for the maximum number of consecutive registers |
| +of class CLASS needed to hold a value of mode MODE. |
| + |
| +This is closely related to the macro HARD_REGNO_NREGS. In fact, |
| +the value of the macro CLASS_MAX_NREGS(CLASS, MODE) |
| +should be the maximum value of HARD_REGNO_NREGS(REGNO, MODE) |
| +for all REGNO values in the class CLASS. |
| + |
| +This macro helps control the handling of multiple-word values |
| +in the reload pass. |
| +*/ |
| +#define CLASS_MAX_NREGS(CLASS, MODE) /* ToDo:fixme */ \ |
| + (unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD) |
| + |
| + |
| +/* |
| + Using CONST_OK_FOR_CONSTRAINT_P instead of CONS_OK_FOR_LETTER_P |
| + in order to support constraints with more than one letter. |
| + Only two letters are then used for constant constraints, |
| + the letter 'K' and the letter 'I'. The constraint starting with |
| + these letters must consist of four characters. The character following |
| + 'K' or 'I' must be either 'u' (unsigned) or 's' (signed) to specify |
| + if the constant is zero or sign extended. The last two characters specify |
| + the length in bits of the constant. The base constraint letter 'I' means |
| + that this is an negated constant, meaning that actually -VAL should be |
| + checked to lie withing the valid range instead of VAL which is used when |
| + 'K' is the base constraint letter. |
| + |
| +*/ |
| + |
| +#define CONSTRAINT_LEN(C, STR) \ |
| + ( ((C) == 'K' || (C) == 'I') ? 4 : \ |
| + ((C) == 'R') ? 5 : \ |
| + ((C) == 'P') ? -1 : \ |
| + DEFAULT_CONSTRAINT_LEN((C), (STR)) ) |
| + |
| +#define CONST_OK_FOR_CONSTRAINT_P(VALUE, C, STR) \ |
| + avr32_const_ok_for_constraint_p(VALUE, C, STR) |
| + |
| +/* |
| +A C expression that defines the machine-dependent operand constraint |
| +letters that specify particular ranges of const_double values ('G' or 'H'). |
| + |
| +If C is one of those letters, the expression should check that |
| +VALUE, an RTX of code const_double, is in the appropriate |
| +range and return 1 if so, 0 otherwise. If C is not one of those |
| +letters, the value should be 0 regardless of VALUE. |
| + |
| +const_double is used for all floating-point constants and for |
| +DImode fixed-point constants. A given letter can accept either |
| +or both kinds of values. It can use GET_MODE to distinguish |
| +between these kinds. |
| +*/ |
| +#define CONST_DOUBLE_OK_FOR_LETTER_P(OP, C) \ |
| + ((C) == 'G' ? avr32_const_double_immediate(OP) : 0) |
| + |
| +/* |
| +A C expression that defines the optional machine-dependent constraint |
| +letters that can be used to segregate specific types of operands, usually |
| +memory references, for the target machine. Any letter that is not |
| +elsewhere defined and not matched by REG_CLASS_FROM_LETTER |
| +may be used. Normally this macro will not be defined. |
| + |
| +If it is required for a particular target machine, it should return 1 |
| +if VALUE corresponds to the operand type represented by the |
| +constraint letter C. If C is not defined as an extra |
| +constraint, the value returned should be 0 regardless of VALUE. |
| + |
| +For example, on the ROMP, load instructions cannot have their output |
| +in r0 if the memory reference contains a symbolic address. Constraint |
| +letter 'Q' is defined as representing a memory address that does |
| +not contain a symbolic address. An alternative is specified with |
| +a 'Q' constraint on the input and 'r' on the output. The next |
| +alternative specifies 'm' on the input and a register class that |
| +does not include r0 on the output. |
| +*/ |
| +#define EXTRA_CONSTRAINT_STR(OP, C, STR) \ |
| + ((C) == 'W' ? avr32_address_operand(OP, GET_MODE(OP)) : \ |
| + (C) == 'R' ? (avr32_indirect_register_operand(OP, GET_MODE(OP)) || \ |
| + (avr32_imm_disp_memory_operand(OP, GET_MODE(OP)) \ |
| + && avr32_const_ok_for_constraint_p( \ |
| + INTVAL(XEXP(XEXP(OP, 0), 1)), \ |
| + (STR)[1], &(STR)[1]))) : \ |
| + (C) == 'S' ? avr32_indexed_memory_operand(OP, GET_MODE(OP)) : \ |
| + (C) == 'T' ? avr32_const_pool_ref_operand(OP, GET_MODE(OP)) : \ |
| + (C) == 'U' ? SYMBOL_REF_RCALL_FUNCTION_P(OP) : \ |
| + (C) == 'Z' ? avr32_cop_memory_operand(OP, GET_MODE(OP)) : \ |
| + (C) == 'Q' ? avr32_non_rmw_memory_operand(OP, GET_MODE(OP)) : \ |
| + (C) == 'Y' ? avr32_rmw_memory_operand(OP, GET_MODE(OP)) : \ |
| + 0) |
| + |
| + |
| +#define EXTRA_MEMORY_CONSTRAINT(C, STR) ( ((C) == 'R') || \ |
| + ((C) == 'Q') || \ |
| + ((C) == 'S') || \ |
| + ((C) == 'Y') || \ |
| + ((C) == 'Z') ) |
| + |
| + |
| +/* Returns nonzero if op is a function SYMBOL_REF which |
| + can be called using an rcall instruction */ |
| +#define SYMBOL_REF_RCALL_FUNCTION_P(op) \ |
| + ( GET_CODE(op) == SYMBOL_REF \ |
| + && SYMBOL_REF_FUNCTION_P(op) \ |
| + && SYMBOL_REF_LOCAL_P(op) \ |
| + && !SYMBOL_REF_EXTERNAL_P(op) \ |
| + && !TARGET_HAS_ASM_ADDR_PSEUDOS ) |
| + |
| +/****************************************************************************** |
| + * Stack Layout and Calling Conventions |
| + *****************************************************************************/ |
| + |
| +/** Basic Stack Layout **/ |
| + |
| +/* |
| +Define this macro if pushing a word onto the stack moves the stack |
| +pointer to a smaller address. |
| + |
| +When we say, ``define this macro if ...,'' it means that the |
| +compiler checks this macro only with #ifdef so the precise |
| +definition used does not matter. |
| +*/ |
| +/* pushm decrece SP: *(--SP) <-- Rx */ |
| +#define STACK_GROWS_DOWNWARD |
| + |
| +/* |
| +This macro defines the operation used when something is pushed |
| +on the stack. In RTL, a push operation will be |
| +(set (mem (STACK_PUSH_CODE (reg sp))) ...) |
| + |
| +The choices are PRE_DEC, POST_DEC, PRE_INC, |
| +and POST_INC. Which of these is correct depends on |
| +the stack direction and on whether the stack pointer points |
| +to the last item on the stack or whether it points to the |
| +space for the next item on the stack. |
| + |
| +The default is PRE_DEC when STACK_GROWS_DOWNWARD is |
| +defined, which is almost always right, and PRE_INC otherwise, |
| +which is often wrong. |
| +*/ |
| +/* pushm: *(--SP) <-- Rx */ |
| +#define STACK_PUSH_CODE PRE_DEC |
| + |
| +/* Define this to nonzero if the nominal address of the stack frame |
| + is at the high-address end of the local variables; |
| + that is, each additional local variable allocated |
| + goes at a more negative offset in the frame. */ |
| +#define FRAME_GROWS_DOWNWARD 1 |
| + |
| + |
| +/* |
| +Offset from the frame pointer to the first local variable slot to be allocated. |
| + |
| +If FRAME_GROWS_DOWNWARD, find the next slot's offset by |
| +subtracting the first slot's length from STARTING_FRAME_OFFSET. |
| +Otherwise, it is found by adding the length of the first slot to the |
| +value STARTING_FRAME_OFFSET. |
| + (i'm not sure if the above is still correct.. had to change it to get |
| + rid of an overfull. --mew 2feb93 ) |
| +*/ |
| +#define STARTING_FRAME_OFFSET 0 |
| + |
| +/* |
| +Offset from the stack pointer register to the first location at which |
| +outgoing arguments are placed. If not specified, the default value of |
| +zero is used. This is the proper value for most machines. |
| + |
| +If ARGS_GROW_DOWNWARD, this is the offset to the location above |
| +the first location at which outgoing arguments are placed. |
| +*/ |
| +#define STACK_POINTER_OFFSET 0 |
| + |
| +/* |
| +Offset from the argument pointer register to the first argument's |
| +address. On some machines it may depend on the data type of the |
| +function. |
| + |
| +If ARGS_GROW_DOWNWARD, this is the offset to the location above |
| +the first argument's address. |
| +*/ |
| +#define FIRST_PARM_OFFSET(FUNDECL) 0 |
| + |
| + |
| +/* |
| +A C expression whose value is RTL representing the address in a stack |
| +frame where the pointer to the caller's frame is stored. Assume that |
| +FRAMEADDR is an RTL expression for the address of the stack frame |
| +itself. |
| + |
| +If you don't define this macro, the default is to return the value |
| +of FRAMEADDR - that is, the stack frame address is also the |
| +address of the stack word that points to the previous frame. |
| +*/ |
| +#define DYNAMIC_CHAIN_ADDRESS(FRAMEADDR) plus_constant ((FRAMEADDR), 4) |
| + |
| + |
| +/* |
| +A C expression whose value is RTL representing the value of the return |
| +address for the frame COUNT steps up from the current frame, after |
| +the prologue. FRAMEADDR is the frame pointer of the COUNT |
| +frame, or the frame pointer of the COUNT - 1 frame if |
| +RETURN_ADDR_IN_PREVIOUS_FRAME is defined. |
| + |
| +The value of the expression must always be the correct address when |
| +COUNT is zero, but may be NULL_RTX if there is not way to |
| +determine the return address of other frames. |
| +*/ |
| +#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) avr32_return_addr(COUNT, FRAMEADDR) |
| + |
| + |
| +/* |
| +A C expression whose value is RTL representing the location of the |
| +incoming return address at the beginning of any function, before the |
| +prologue. This RTL is either a REG, indicating that the return |
| +value is saved in 'REG', or a MEM representing a location in |
| +the stack. |
| + |
| +You only need to define this macro if you want to support call frame |
| +debugging information like that provided by DWARF 2. |
| + |
| +If this RTL is a REG, you should also define |
| +DWARF_FRAME_RETURN_COLUMN to DWARF_FRAME_REGNUM (REGNO). |
| +*/ |
| +#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM) |
| + |
| + |
| + |
| +/* |
| +A C expression whose value is an integer giving the offset, in bytes, |
| +from the value of the stack pointer register to the top of the stack |
| +frame at the beginning of any function, before the prologue. The top of |
| +the frame is defined to be the value of the stack pointer in the |
| +previous frame, just before the call instruction. |
| + |
| +You only need to define this macro if you want to support call frame |
| +debugging information like that provided by DWARF 2. |
| +*/ |
| +#define INCOMING_FRAME_SP_OFFSET 0 |
| + |
| + |
| +/** Exception Handling Support **/ |
| + |
| +/* Use setjump/longjump for exception handling. */ |
| +#define DWARF2_UNWIND_INFO 0 |
| +#define MUST_USE_SJLJ_EXCEPTIONS 1 |
| + |
| +/* |
| +A C expression whose value is the Nth register number used for |
| +data by exception handlers, or INVALID_REGNUM if fewer than |
| +N registers are usable. |
| + |
| +The exception handling library routines communicate with the exception |
| +handlers via a set of agreed upon registers. Ideally these registers |
| +should be call-clobbered; it is possible to use call-saved registers, |
| +but may negatively impact code size. The target must support at least |
| +2 data registers, but should define 4 if there are enough free registers. |
| + |
| +You must define this macro if you want to support call frame exception |
| +handling like that provided by DWARF 2. |
| +*/ |
| +/* |
| + Use r9-r11 |
| +*/ |
| +#define EH_RETURN_DATA_REGNO(N) \ |
| + ((N<3) ? INTERNAL_REGNUM(N+9) : INVALID_REGNUM) |
| + |
| +/* |
| +A C expression whose value is RTL representing a location in which |
| +to store a stack adjustment to be applied before function return. |
| +This is used to unwind the stack to an exception handler's call frame. |
| +It will be assigned zero on code paths that return normally. |
| + |
| +Typically this is a call-clobbered hard register that is otherwise |
| +untouched by the epilogue, but could also be a stack slot. |
| + |
| +You must define this macro if you want to support call frame exception |
| +handling like that provided by DWARF 2. |
| +*/ |
| +/* |
| + Use r8 |
| +*/ |
| +#define EH_RETURN_STACKADJ_REGNO INTERNAL_REGNUM(8) |
| +#define EH_RETURN_STACKADJ_RTX gen_rtx_REG(SImode, EH_RETURN_STACKADJ_REGNO) |
| + |
| +/* |
| +A C expression whose value is RTL representing a location in which |
| +to store the address of an exception handler to which we should |
| +return. It will not be assigned on code paths that return normally. |
| + |
| +Typically this is the location in the call frame at which the normal |
| +return address is stored. For targets that return by popping an |
| +address off the stack, this might be a memory address just below |
| +the target call frame rather than inside the current call |
| +frame. EH_RETURN_STACKADJ_RTX will have already been assigned, |
| +so it may be used to calculate the location of the target call frame. |
| + |
| +Some targets have more complex requirements than storing to an |
| +address calculable during initial code generation. In that case |
| +the eh_return instruction pattern should be used instead. |
| + |
| +If you want to support call frame exception handling, you must |
| +define either this macro or the eh_return instruction pattern. |
| +*/ |
| +/* |
| + We define the eh_return instruction pattern, so this isn't needed. |
| +*/ |
| +/* #define EH_RETURN_HANDLER_RTX gen_rtx_REG(Pmode, RET_REGISTER) */ |
| + |
| +/* |
| + This macro chooses the encoding of pointers embedded in the |
| + exception handling sections. If at all possible, this should be |
| + defined such that the exception handling section will not require |
| + dynamic relocations, and so may be read-only. |
| + |
| + code is 0 for data, 1 for code labels, 2 for function |
| + pointers. global is true if the symbol may be affected by dynamic |
| + relocations. The macro should return a combination of the DW_EH_PE_* |
| + defines as found in dwarf2.h. |
| + |
| + If this macro is not defined, pointers will not be encoded but |
| + represented directly. |
| +*/ |
| +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ |
| + ((flag_pic && (GLOBAL) ? DW_EH_PE_indirect : 0) \ |
| + | (flag_pic ? DW_EH_PE_pcrel : DW_EH_PE_absptr) \ |
| + | DW_EH_PE_sdata4) |
| + |
| +/* ToDo: The rest of this subsection */ |
| + |
| +/** Specifying How Stack Checking is Done **/ |
| +/* ToDo: All in this subsection */ |
| + |
| +/** Registers That Address the Stack Frame **/ |
| + |
| +/* |
| +The register number of the stack pointer register, which must also be a |
| +fixed register according to FIXED_REGISTERS. On most machines, |
| +the hardware determines which register this is. |
| +*/ |
| +/* Using r13 as stack pointer. */ |
| +#define STACK_POINTER_REGNUM INTERNAL_REGNUM(13) |
| + |
| +/* |
| +The register number of the frame pointer register, which is used to |
| +access automatic variables in the stack frame. On some machines, the |
| +hardware determines which register this is. On other machines, you can |
| +choose any register you wish for this purpose. |
| +*/ |
| +/* Use r7 */ |
| +#define FRAME_POINTER_REGNUM INTERNAL_REGNUM(7) |
| + |
| + |
| + |
| +/* |
| +The register number of the arg pointer register, which is used to access |
| +the function's argument list. On some machines, this is the same as the |
| +frame pointer register. On some machines, the hardware determines which |
| +register this is. On other machines, you can choose any register you |
| +wish for this purpose. If this is not the same register as the frame |
| +pointer register, then you must mark it as a fixed register according to |
| +FIXED_REGISTERS, or arrange to be able to eliminate it (see Section |
| +10.10.5 [Elimination], page 224). |
| +*/ |
| +/* Using r5 */ |
| +#define ARG_POINTER_REGNUM INTERNAL_REGNUM(4) |
| + |
| + |
| +/* |
| +Register numbers used for passing a function's static chain pointer. If |
| +register windows are used, the register number as seen by the called |
| +function is STATIC_CHAIN_INCOMING_REGNUM, while the register |
| +number as seen by the calling function is STATIC_CHAIN_REGNUM. If |
| +these registers are the same, STATIC_CHAIN_INCOMING_REGNUM need |
| +not be defined. |
| + |
| +The static chain register need not be a fixed register. |
| + |
| +If the static chain is passed in memory, these macros should not be |
| +defined; instead, the next two macros should be defined. |
| +*/ |
| +/* Using r0 */ |
| +#define STATIC_CHAIN_REGNUM INTERNAL_REGNUM(0) |
| + |
| + |
| +/** Eliminating Frame Pointer and Arg Pointer **/ |
| + |
| +/* |
| +A C expression which is nonzero if a function must have and use a frame |
| +pointer. This expression is evaluated in the reload pass. If its value is |
| +nonzero the function will have a frame pointer. |
| + |
| +The expression can in principle examine the current function and decide |
| +according to the facts, but on most machines the constant 0 or the |
| +constant 1 suffices. Use 0 when the machine allows code to be generated |
| +with no frame pointer, and doing so saves some time or space. Use 1 |
| +when there is no possible advantage to avoiding a frame pointer. |
| + |
| +In certain cases, the compiler does not know how to produce valid code |
| +without a frame pointer. The compiler recognizes those cases and |
| +automatically gives the function a frame pointer regardless of what |
| +FRAME_POINTER_REQUIRED says. You don't need to worry about |
| +them. |
| + |
| +In a function that does not require a frame pointer, the frame pointer |
| +register can be allocated for ordinary usage, unless you mark it as a |
| +fixed register. See FIXED_REGISTERS for more information. |
| +*/ |
| +/* We need the frame pointer when compiling for profiling */ |
| +#define FRAME_POINTER_REQUIRED (current_function_profile) |
| + |
| +/* |
| +A C statement to store in the variable DEPTH_VAR the difference |
| +between the frame pointer and the stack pointer values immediately after |
| +the function prologue. The value would be computed from information |
| +such as the result of get_frame_size () and the tables of |
| +registers regs_ever_live and call_used_regs. |
| + |
| +If ELIMINABLE_REGS is defined, this macro will be not be used and |
| +need not be defined. Otherwise, it must be defined even if |
| +FRAME_POINTER_REQUIRED is defined to always be true; in that |
| +case, you may set DEPTH_VAR to anything. |
| +*/ |
| +#define INITIAL_FRAME_POINTER_OFFSET(DEPTH_VAR) ((DEPTH_VAR) = get_frame_size()) |
| + |
| +/* |
| +If defined, this macro specifies a table of register pairs used to |
| +eliminate unneeded registers that point into the stack frame. If it is not |
| +defined, the only elimination attempted by the compiler is to replace |
| +references to the frame pointer with references to the stack pointer. |
| + |
| +The definition of this macro is a list of structure initializations, each |
| +of which specifies an original and replacement register. |
| + |
| +On some machines, the position of the argument pointer is not known until |
| +the compilation is completed. In such a case, a separate hard register |
| +must be used for the argument pointer. This register can be eliminated by |
| +replacing it with either the frame pointer or the argument pointer, |
| +depending on whether or not the frame pointer has been eliminated. |
| + |
| +In this case, you might specify: |
| + #define ELIMINABLE_REGS \ |
| + {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ |
| + {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \ |
| + {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}} |
| + |
| +Note that the elimination of the argument pointer with the stack pointer is |
| +specified first since that is the preferred elimination. |
| +*/ |
| +#define ELIMINABLE_REGS \ |
| +{ \ |
| + { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ |
| + { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ |
| + { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM } \ |
| +} |
| + |
| +/* |
| +A C expression that returns nonzero if the compiler is allowed to try |
| +to replace register number FROM with register number |
| +TO. This macro need only be defined if ELIMINABLE_REGS |
| +is defined, and will usually be the constant 1, since most of the cases |
| +preventing register elimination are things that the compiler already |
| +knows about. |
| +*/ |
| +#define CAN_ELIMINATE(FROM, TO) 1 |
| + |
| +/* |
| +This macro is similar to INITIAL_FRAME_POINTER_OFFSET. It |
| +specifies the initial difference between the specified pair of |
| +registers. This macro must be defined if ELIMINABLE_REGS is |
| +defined. |
| +*/ |
| +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ |
| + ((OFFSET) = avr32_initial_elimination_offset(FROM, TO)) |
| + |
| +/** Passing Function Arguments on the Stack **/ |
| + |
| + |
| +/* |
| +A C expression. If nonzero, push insns will be used to pass |
| +outgoing arguments. |
| +If the target machine does not have a push instruction, set it to zero. |
| +That directs GCC to use an alternate strategy: to |
| +allocate the entire argument block and then store the arguments into |
| +it. When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too. |
| +*/ |
| +#define PUSH_ARGS 1 |
| + |
| + |
| +/* |
| +A C expression that is the number of bytes actually pushed onto the |
| +stack when an instruction attempts to push NPUSHED bytes. |
| + |
| +On some machines, the definition |
| + |
| + #define PUSH_ROUNDING(BYTES) (BYTES) |
| + |
| +will suffice. But on other machines, instructions that appear |
| +to push one byte actually push two bytes in an attempt to maintain |
| +alignment. Then the definition should be |
| + |
| + #define PUSH_ROUNDING(BYTES) (((BYTES) + 1) & ~1) |
| +*/ |
| +/* Push 4 bytes at the time. */ |
| +#define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) |
| + |
| +/* |
| +A C expression. If nonzero, the maximum amount of space required for |
| +outgoing arguments will be computed and placed into the variable |
| +current_function_outgoing_args_size. No space will be pushed |
| +onto the stack for each call; instead, the function prologue should |
| +increase the stack frame size by this amount. |
| + |
| +Setting both PUSH_ARGS and ACCUMULATE_OUTGOING_ARGS is not proper. |
| +*/ |
| +#define ACCUMULATE_OUTGOING_ARGS 0 |
| + |
| + |
| + |
| + |
| +/* |
| +A C expression that should indicate the number of bytes of its own |
| +arguments that a function pops on returning, or 0 if the |
| +function pops no arguments and the caller must therefore pop them all |
| +after the function returns. |
| + |
| +FUNDECL is a C variable whose value is a tree node that describes |
| +the function in question. Normally it is a node of type |
| +FUNCTION_DECL that describes the declaration of the function. |
| +From this you can obtain the DECL_ATTRIBUTES of the function. |
| + |
| +FUNTYPE is a C variable whose value is a tree node that |
| +describes the function in question. Normally it is a node of type |
| +FUNCTION_TYPE that describes the data type of the function. |
| +From this it is possible to obtain the data types of the value and |
| +arguments (if known). |
| + |
| +When a call to a library function is being considered, FUNDECL |
| +will contain an identifier node for the library function. Thus, if |
| +you need to distinguish among various library functions, you can do so |
| +by their names. Note that ``library function'' in this context means |
| +a function used to perform arithmetic, whose name is known specially |
| +in the compiler and was not mentioned in the C code being compiled. |
| + |
| +STACK_SIZE is the number of bytes of arguments passed on the |
| +stack. If a variable number of bytes is passed, it is zero, and |
| +argument popping will always be the responsibility of the calling function. |
| + |
| +On the VAX, all functions always pop their arguments, so the definition |
| +of this macro is STACK_SIZE. On the 68000, using the standard |
| +calling convention, no functions pop their arguments, so the value of |
| +the macro is always 0 in this case. But an alternative calling |
| +convention is available in which functions that take a fixed number of |
| +arguments pop them but other functions (such as printf) pop |
| +nothing (the caller pops all). When this convention is in use, |
| +FUNTYPE is examined to determine whether a function takes a fixed |
| +number of arguments. |
| +*/ |
| +#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACK_SIZE) 0 |
| + |
| + |
| +/*Return true if this function can we use a single return instruction*/ |
| +#define USE_RETURN_INSN(ISCOND) avr32_use_return_insn(ISCOND) |
| + |
| +/* |
| +A C expression that should indicate the number of bytes a call sequence |
| +pops off the stack. It is added to the value of RETURN_POPS_ARGS |
| +when compiling a function call. |
| + |
| +CUM is the variable in which all arguments to the called function |
| +have been accumulated. |
| + |
| +On certain architectures, such as the SH5, a call trampoline is used |
| +that pops certain registers off the stack, depending on the arguments |
| +that have been passed to the function. Since this is a property of the |
| +call site, not of the called function, RETURN_POPS_ARGS is not |
| +appropriate. |
| +*/ |
| +#define CALL_POPS_ARGS(CUM) 0 |
| + |
| +/* Passing Arguments in Registers */ |
| + |
| +/* |
| +A C expression that controls whether a function argument is passed |
| +in a register, and which register. |
| + |
| +The arguments are CUM, which summarizes all the previous |
| +arguments; MODE, the machine mode of the argument; TYPE, |
| +the data type of the argument as a tree node or 0 if that is not known |
| +(which happens for C support library functions); and NAMED, |
| +which is 1 for an ordinary argument and 0 for nameless arguments that |
| +correspond to '...' in the called function's prototype. |
| +TYPE can be an incomplete type if a syntax error has previously |
| +occurred. |
| + |
| +The value of the expression is usually either a reg RTX for the |
| +hard register in which to pass the argument, or zero to pass the |
| +argument on the stack. |
| + |
| +For machines like the VAX and 68000, where normally all arguments are |
| +pushed, zero suffices as a definition. |
| + |
| +The value of the expression can also be a parallel RTX. This is |
| +used when an argument is passed in multiple locations. The mode of the |
| +of the parallel should be the mode of the entire argument. The |
| +parallel holds any number of expr_list pairs; each one |
| +describes where part of the argument is passed. In each |
| +expr_list the first operand must be a reg RTX for the hard |
| +register in which to pass this part of the argument, and the mode of the |
| +register RTX indicates how large this part of the argument is. The |
| +second operand of the expr_list is a const_int which gives |
| +the offset in bytes into the entire argument of where this part starts. |
| +As a special exception the first expr_list in the parallel |
| +RTX may have a first operand of zero. This indicates that the entire |
| +argument is also stored on the stack. |
| + |
| +The last time this macro is called, it is called with MODE == VOIDmode, |
| +and its result is passed to the call or call_value |
| +pattern as operands 2 and 3 respectively. |
| + |
| +The usual way to make the ISO library 'stdarg.h' work on a machine |
| +where some arguments are usually passed in registers, is to cause |
| +nameless arguments to be passed on the stack instead. This is done |
| +by making FUNCTION_ARG return 0 whenever NAMED is 0. |
| + |
| +You may use the macro MUST_PASS_IN_STACK (MODE, TYPE) |
| +in the definition of this macro to determine if this argument is of a |
| +type that must be passed in the stack. If REG_PARM_STACK_SPACE |
| +is not defined and FUNCTION_ARG returns nonzero for such an |
| +argument, the compiler will abort. If REG_PARM_STACK_SPACE is |
| +defined, the argument will be computed in the stack and then loaded into |
| +a register. */ |
| + |
| +#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \ |
| + avr32_function_arg(&(CUM), MODE, TYPE, NAMED) |
| + |
| + |
| + |
| + |
| +/* |
| +A C type for declaring a variable that is used as the first argument of |
| +FUNCTION_ARG and other related values. For some target machines, |
| +the type int suffices and can hold the number of bytes of |
| +argument so far. |
| + |
| +There is no need to record in CUMULATIVE_ARGS anything about the |
| +arguments that have been passed on the stack. The compiler has other |
| +variables to keep track of that. For target machines on which all |
| +arguments are passed on the stack, there is no need to store anything in |
| +CUMULATIVE_ARGS; however, the data structure must exist and |
| +should not be empty, so use int. |
| +*/ |
| +typedef struct avr32_args |
| +{ |
| + /* Index representing the argument register the current function argument |
| + will occupy */ |
| + int index; |
| + /* A mask with bits representing the argument registers: if a bit is set |
| + then this register is used for an arguemnt */ |
| + int used_index; |
| + /* TRUE if this function has anonymous arguments */ |
| + int uses_anonymous_args; |
| + /* The size in bytes of the named arguments pushed on the stack */ |
| + int stack_pushed_args_size; |
| + /* Set to true if this function needs a Return Value Pointer */ |
| + int use_rvp; |
| + |
| +} CUMULATIVE_ARGS; |
| + |
| + |
| +#define FIRST_CUM_REG_INDEX 0 |
| +#define LAST_CUM_REG_INDEX 4 |
| +#define GET_REG_INDEX(CUM) ((CUM)->index) |
| +#define SET_REG_INDEX(CUM, INDEX) ((CUM)->index = (INDEX)); |
| +#define GET_USED_INDEX(CUM, INDEX) ((CUM)->used_index & (1 << (INDEX))) |
| +#define SET_USED_INDEX(CUM, INDEX) \ |
| + do \ |
| + { \ |
| + if (INDEX >= 0) \ |
| + (CUM)->used_index |= (1 << (INDEX)); \ |
| + } \ |
| + while (0) |
| +#define SET_INDEXES_UNUSED(CUM) ((CUM)->used_index = 0) |
| + |
| + |
| +/* |
| + A C statement (sans semicolon) for initializing the variable cum for the |
| + state at the beginning of the argument list. The variable has type |
| + CUMULATIVE_ARGS. The value of FNTYPE is the tree node for the data type of |
| + the function which will receive the args, or 0 if the args are to a compiler |
| + support library function. For direct calls that are not libcalls, FNDECL |
| + contain the declaration node of the function. FNDECL is also set when |
| + INIT_CUMULATIVE_ARGS is used to find arguments for the function being |
| + compiled. N_NAMED_ARGS is set to the number of named arguments, including a |
| + structure return address if it is passed as a parameter, when making a call. |
| + When processing incoming arguments, N_NAMED_ARGS is set to -1. |
| + |
| + When processing a call to a compiler support library function, LIBNAME |
| + identifies which one. It is a symbol_ref rtx which contains the name of the |
| + function, as a string. LIBNAME is 0 when an ordinary C function call is |
| + being processed. Thus, each time this macro is called, either LIBNAME or |
| + FNTYPE is nonzero, but never both of them at once. |
| +*/ |
| +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \ |
| + avr32_init_cumulative_args(&(CUM), FNTYPE, LIBNAME, FNDECL) |
| + |
| + |
| +/* |
| +A C statement (sans semicolon) to update the summarizer variable |
| +CUM to advance past an argument in the argument list. The |
| +values MODE, TYPE and NAMED describe that argument. |
| +Once this is done, the variable CUM is suitable for analyzing |
| +the following argument with FUNCTION_ARG, etc. |
| + |
| +This macro need not do anything if the argument in question was passed |
| +on the stack. The compiler knows how to track the amount of stack space |
| +used for arguments without any special help. |
| +*/ |
| +#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \ |
| + avr32_function_arg_advance(&(CUM), MODE, TYPE, NAMED) |
| + |
| +/* |
| +If defined, a C expression which determines whether, and in which direction, |
| +to pad out an argument with extra space. The value should be of type |
| +enum direction: either 'upward' to pad above the argument, |
| +'downward' to pad below, or 'none' to inhibit padding. |
| + |
| +The amount of padding is always just enough to reach the next |
| +multiple of FUNCTION_ARG_BOUNDARY; this macro does not control |
| +it. |
| + |
| +This macro has a default definition which is right for most systems. |
| +For little-endian machines, the default is to pad upward. For |
| +big-endian machines, the default is to pad downward for an argument of |
| +constant size shorter than an int, and upward otherwise. |
| +*/ |
| +#define FUNCTION_ARG_PADDING(MODE, TYPE) \ |
| + avr32_function_arg_padding(MODE, TYPE) |
| + |
| +/* |
| + Specify padding for the last element of a block move between registers |
| + and memory. First is nonzero if this is the only element. Defining |
| + this macro allows better control of register function parameters on |
| + big-endian machines, without using PARALLEL rtl. In particular, |
| + MUST_PASS_IN_STACK need not test padding and mode of types in registers, |
| + as there is no longer a "wrong" part of a register; For example, a three |
| + byte aggregate may be passed in the high part of a register if so required. |
| +*/ |
| +#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \ |
| + avr32_function_arg_padding(MODE, TYPE) |
| + |
| +/* |
| +If defined, a C expression which determines whether the default |
| +implementation of va_arg will attempt to pad down before reading the |
| +next argument, if that argument is smaller than its aligned space as |
| +controlled by PARM_BOUNDARY. If this macro is not defined, all such |
| +arguments are padded down if BYTES_BIG_ENDIAN is true. |
| +*/ |
| +#define PAD_VARARGS_DOWN \ |
| + (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward) |
| + |
| + |
| +/* |
| +A C expression that is nonzero if REGNO is the number of a hard |
| +register in which function arguments are sometimes passed. This does |
| +not include implicit arguments such as the static chain and |
| +the structure-value address. On many machines, no registers can be |
| +used for this purpose since all function arguments are pushed on the |
| +stack. |
| +*/ |
| +/* |
| + Use r8 - r12 for function arguments. |
| +*/ |
| +#define FUNCTION_ARG_REGNO_P(REGNO) \ |
| + (REGNO >= 3 && REGNO <= 7) |
| + |
| +/* Number of registers used for passing function arguments */ |
| +#define NUM_ARG_REGS 5 |
| + |
| +/* |
| +If defined, the order in which arguments are loaded into their |
| +respective argument registers is reversed so that the last |
| +argument is loaded first. This macro only affects arguments |
| +passed in registers. |
| +*/ |
| +/* #define LOAD_ARGS_REVERSED */ |
| + |
| +/** How Scalar Function Values Are Returned **/ |
| + |
| +/* AVR32 is using r12 as return register. */ |
| +#define RET_REGISTER (15 - 12) |
| + |
| + |
| +/* |
| +A C expression to create an RTX representing the place where a library |
| +function returns a value of mode MODE. If the precise function |
| +being called is known, FUNC is a tree node |
| +(FUNCTION_DECL) for it; otherwise, func is a null |
| +pointer. This makes it possible to use a different value-returning |
| +convention for specific functions when all their calls are |
| +known. |
| + |
| +Note that "library function" in this context means a compiler |
| +support routine, used to perform arithmetic, whose name is known |
| +specially by the compiler and was not mentioned in the C code being |
| +compiled. |
| + |
| +The definition of LIBRARY_VALUE need not be concerned aggregate |
| +data types, because none of the library functions returns such types. |
| +*/ |
| +#define LIBCALL_VALUE(MODE) avr32_libcall_value(MODE) |
| + |
| +/* |
| +A C expression that is nonzero if REGNO is the number of a hard |
| +register in which the values of called function may come back. |
| + |
| +A register whose use for returning values is limited to serving as the |
| +second of a pair (for a value of type double, say) need not be |
| +recognized by this macro. So for most machines, this definition |
| +suffices: |
| + #define FUNCTION_VALUE_REGNO_P(N) ((N) == 0) |
| + |
| +If the machine has register windows, so that the caller and the called |
| +function use different registers for the return value, this macro |
| +should recognize only the caller's register numbers. |
| +*/ |
| +/* |
| + When returning a value of mode DImode, r11:r10 is used, else r12 is used. |
| +*/ |
| +#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == RET_REGISTER \ |
| + || (REGNO) == INTERNAL_REGNUM(11)) |
| + |
| + |
| +/** How Large Values Are Returned **/ |
| + |
| + |
| +/* |
| +Define this macro to be 1 if all structure and union return values must be |
| +in memory. Since this results in slower code, this should be defined |
| +only if needed for compatibility with other compilers or with an ABI. |
| +If you define this macro to be 0, then the conventions used for structure |
| +and union return values are decided by the RETURN_IN_MEMORY macro. |
| + |
| +If not defined, this defaults to the value 1. |
| +*/ |
| +#define DEFAULT_PCC_STRUCT_RETURN 0 |
| + |
| + |
| + |
| + |
| +/** Generating Code for Profiling **/ |
| + |
| +/* |
| +A C statement or compound statement to output to FILE some |
| +assembler code to call the profiling subroutine mcount. |
| + |
| +The details of how mcount expects to be called are determined by |
| +your operating system environment, not by GCC. To figure them out, |
| +compile a small program for profiling using the system's installed C |
| +compiler and look at the assembler code that results. |
| + |
| +Older implementations of mcount expect the address of a counter |
| +variable to be loaded into some register. The name of this variable is |
| +'LP' followed by the number LABELNO, so you would generate |
| +the name using 'LP%d' in a fprintf. |
| +*/ |
| +/* ToDo: fixme */ |
| +#ifndef FUNCTION_PROFILER |
| +#define FUNCTION_PROFILER(FILE, LABELNO) \ |
| + fprintf((FILE), "/* profiler %d */", (LABELNO)) |
| +#endif |
| + |
| + |
| +/***************************************************************************** |
| + * Trampolines for Nested Functions * |
| + *****************************************************************************/ |
| + |
| +/* |
| +A C statement to output, on the stream FILE, assembler code for a |
| +block of data that contains the constant parts of a trampoline. This |
| +code should not include a label - the label is taken care of |
| +automatically. |
| + |
| +If you do not define this macro, it means no template is needed |
| +for the target. Do not define this macro on systems where the block move |
| +code to copy the trampoline into place would be larger than the code |
| +to generate it on the spot. |
| +*/ |
| +/* ToDo: correct? */ |
| +#define TRAMPOLINE_TEMPLATE(FILE) avr32_trampoline_template(FILE); |
| + |
| + |
| +/* |
| +A C expression for the size in bytes of the trampoline, as an integer. |
| +*/ |
| +/* ToDo: fixme */ |
| +#define TRAMPOLINE_SIZE 0x0C |
| + |
| +/* |
| +Alignment required for trampolines, in bits. |
| + |
| +If you don't define this macro, the value of BIGGEST_ALIGNMENT |
| +is used for aligning trampolines. |
| +*/ |
| +#define TRAMPOLINE_ALIGNMENT 16 |
| + |
| +/* |
| +A C statement to initialize the variable parts of a trampoline. |
| +ADDR is an RTX for the address of the trampoline; FNADDR is |
| +an RTX for the address of the nested function; STATIC_CHAIN is an |
| +RTX for the static chain value that should be passed to the function |
| +when it is called. |
| +*/ |
| +#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, STATIC_CHAIN) \ |
| + avr32_initialize_trampoline(ADDR, FNADDR, STATIC_CHAIN) |
| + |
| + |
| +/****************************************************************************** |
| + * Implicit Calls to Library Routines |
| + *****************************************************************************/ |
| + |
| +/* Tail calling. */ |
| + |
| +/* A C expression that evaluates to true if it is ok to perform a sibling |
| + call to DECL. */ |
| +#define FUNCTION_OK_FOR_SIBCALL(DECL) 0 |
| + |
| +#define OVERRIDE_OPTIONS avr32_override_options () |
| + |
| +#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) avr32_optimization_options (LEVEL, SIZE) |
| + |
| +/****************************************************************************** |
| + * Addressing Modes |
| + *****************************************************************************/ |
| + |
| +/* |
| +A C expression that is nonzero if the machine supports pre-increment, |
| +pre-decrement, post-increment, or post-decrement addressing respectively. |
| +*/ |
| +/* |
| + AVR32 supports Rp++ and --Rp |
| +*/ |
| +#define HAVE_PRE_INCREMENT 0 |
| +#define HAVE_PRE_DECREMENT 1 |
| +#define HAVE_POST_INCREMENT 1 |
| +#define HAVE_POST_DECREMENT 0 |
| + |
| +/* |
| +A C expression that is nonzero if the machine supports pre- or |
| +post-address side-effect generation involving constants other than |
| +the size of the memory operand. |
| +*/ |
| +#define HAVE_PRE_MODIFY_DISP 0 |
| +#define HAVE_POST_MODIFY_DISP 0 |
| + |
| +/* |
| +A C expression that is nonzero if the machine supports pre- or |
| +post-address side-effect generation involving a register displacement. |
| +*/ |
| +#define HAVE_PRE_MODIFY_REG 0 |
| +#define HAVE_POST_MODIFY_REG 0 |
| + |
| +/* |
| +A C expression that is 1 if the RTX X is a constant which |
| +is a valid address. On most machines, this can be defined as |
| +CONSTANT_P (X), but a few machines are more restrictive |
| +in which constant addresses are supported. |
| + |
| +CONSTANT_P accepts integer-values expressions whose values are |
| +not explicitly known, such as symbol_ref, label_ref, and |
| +high expressions and const arithmetic expressions, in |
| +addition to const_int and const_double expressions. |
| +*/ |
| +#define CONSTANT_ADDRESS_P(X) CONSTANT_P(X) |
| + |
| +/* |
| +A number, the maximum number of registers that can appear in a valid |
| +memory address. Note that it is up to you to specify a value equal to |
| +the maximum number that GO_IF_LEGITIMATE_ADDRESS would ever |
| +accept. |
| +*/ |
| +#define MAX_REGS_PER_ADDRESS 2 |
| + |
| +/* |
| +A C compound statement with a conditional goto LABEL; |
| +executed if X (an RTX) is a legitimate memory address on the |
| +target machine for a memory operand of mode MODE. |
| + |
| +It usually pays to define several simpler macros to serve as |
| +subroutines for this one. Otherwise it may be too complicated to |
| +understand. |
| + |
| +This macro must exist in two variants: a strict variant and a |
| +non-strict one. The strict variant is used in the reload pass. It |
| +must be defined so that any pseudo-register that has not been |
| +allocated a hard register is considered a memory reference. In |
| +contexts where some kind of register is required, a pseudo-register |
| +with no hard register must be rejected. |
| + |
| +The non-strict variant is used in other passes. It must be defined to |
| +accept all pseudo-registers in every context where some kind of |
| +register is required. |
| + |
| +Compiler source files that want to use the strict variant of this |
| +macro define the macro REG_OK_STRICT. You should use an |
| +#ifdef REG_OK_STRICT conditional to define the strict variant |
| +in that case and the non-strict variant otherwise. |
| + |
| +Subroutines to check for acceptable registers for various purposes (one |
| +for base registers, one for index registers, and so on) are typically |
| +among the subroutines used to define GO_IF_LEGITIMATE_ADDRESS. |
| +Then only these subroutine macros need have two variants; the higher |
| +levels of macros may be the same whether strict or not. |
| + |
| +Normally, constant addresses which are the sum of a symbol_ref |
| +and an integer are stored inside a const RTX to mark them as |
| +constant. Therefore, there is no need to recognize such sums |
| +specifically as legitimate addresses. Normally you would simply |
| +recognize any const as legitimate. |
| + |
| +Usually PRINT_OPERAND_ADDRESS is not prepared to handle constant |
| +sums that are not marked with const. It assumes that a naked |
| +plus indicates indexing. If so, then you must reject such |
| +naked constant sums as illegitimate addresses, so that none of them will |
| +be given to PRINT_OPERAND_ADDRESS. |
| + |
| +On some machines, whether a symbolic address is legitimate depends on |
| +the section that the address refers to. On these machines, define the |
| +macro ENCODE_SECTION_INFO to store the information into the |
| +symbol_ref, and then check for it here. When you see a |
| +const, you will have to look inside it to find the |
| +symbol_ref in order to determine the section. |
| + |
| +The best way to modify the name string is by adding text to the |
| +beginning, with suitable punctuation to prevent any ambiguity. Allocate |
| +the new name in saveable_obstack. You will have to modify |
| +ASM_OUTPUT_LABELREF to remove and decode the added text and |
| +output the name accordingly, and define STRIP_NAME_ENCODING to |
| +access the original name string. |
| + |
| +You can check the information stored here into the symbol_ref in |
| +the definitions of the macros GO_IF_LEGITIMATE_ADDRESS and |
| +PRINT_OPERAND_ADDRESS. |
| +*/ |
| +#ifdef REG_OK_STRICT |
| +# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \ |
| + do \ |
| + { \ |
| + if (avr32_legitimate_address(MODE, X, 1)) \ |
| + goto LABEL; \ |
| + } \ |
| + while (0) |
| +#else |
| +# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \ |
| + do \ |
| + { \ |
| + if (avr32_legitimate_address(MODE, X, 0)) \ |
| + goto LABEL; \ |
| + } \ |
| + while (0) |
| +#endif |
| + |
| + |
| + |
| +/* |
| +A C compound statement that attempts to replace X with a valid |
| +memory address for an operand of mode MODE. win will be a |
| +C statement label elsewhere in the code; the macro definition may use |
| + |
| + GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN); |
| + |
| +to avoid further processing if the address has become legitimate. |
| + |
| +X will always be the result of a call to break_out_memory_refs, |
| +and OLDX will be the operand that was given to that function to produce |
| +X. |
| + |
| +The code generated by this macro should not alter the substructure of |
| +X. If it transforms X into a more legitimate form, it |
| +should assign X (which will always be a C variable) a new value. |
| + |
| +It is not necessary for this macro to come up with a legitimate |
| +address. The compiler has standard ways of doing so in all cases. In |
| +fact, it is safe for this macro to do nothing. But often a |
| +machine-dependent strategy can generate better code. |
| +*/ |
| +#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \ |
| + do \ |
| + { \ |
| + if (GET_CODE(X) == PLUS \ |
| + && GET_CODE(XEXP(X, 0)) == REG \ |
| + && GET_CODE(XEXP(X, 1)) == CONST_INT \ |
| + && !CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(X, 1)), \ |
| + 'K', "Ks16")) \ |
| + { \ |
| + rtx index = force_reg(SImode, XEXP(X, 1)); \ |
| + X = gen_rtx_PLUS( SImode, XEXP(X, 0), index); \ |
| + } \ |
| + GO_IF_LEGITIMATE_ADDRESS(MODE, X, WIN); \ |
| + } \ |
| + while(0) |
| + |
| + |
| +/* |
| +A C statement or compound statement with a conditional |
| +goto LABEL; executed if memory address X (an RTX) can have |
| +different meanings depending on the machine mode of the memory |
| +reference it is used for or if the address is valid for some modes |
| +but not others. |
| + |
| +Autoincrement and autodecrement addresses typically have mode-dependent |
| +effects because the amount of the increment or decrement is the size |
| +of the operand being addressed. Some machines have other mode-dependent |
| +addresses. Many RISC machines have no mode-dependent addresses. |
| + |
| +You may assume that ADDR is a valid address for the machine. |
| +*/ |
| +#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \ |
| + do \ |
| + { \ |
| + if (GET_CODE (ADDR) == POST_INC \ |
| + || GET_CODE (ADDR) == PRE_DEC) \ |
| + goto LABEL; \ |
| + } \ |
| + while (0) |
| + |
| +/* |
| +A C expression that is nonzero if X is a legitimate constant for |
| +an immediate operand on the target machine. You can assume that |
| +X satisfies CONSTANT_P, so you need not check this. In fact, |
| +'1' is a suitable definition for this macro on machines where |
| +anything CONSTANT_P is valid. |
| +*/ |
| +#define LEGITIMATE_CONSTANT_P(X) avr32_legitimate_constant_p(X) |
| + |
| + |
| +/****************************************************************************** |
| + * Condition Code Status |
| + *****************************************************************************/ |
| + |
| +/* |
| +C code for a data type which is used for declaring the mdep |
| +component of cc_status. It defaults to int. |
| + |
| +This macro is not used on machines that do not use cc0. |
| +*/ |
| + |
| +typedef struct |
| +{ |
| + int flags; |
| + rtx value; |
| + int fpflags; |
| + rtx fpvalue; |
| + int cond_exec_cmp_clobbered; |
| +} avr32_status_reg; |
| + |
| + |
| +#define CC_STATUS_MDEP avr32_status_reg |
| + |
| +/* |
| +A C expression to initialize the mdep field to "empty". |
| +The default definition does nothing, since most machines don't use |
| +the field anyway. If you want to use the field, you should probably |
| +define this macro to initialize it. |
| + |
| +This macro is not used on machines that do not use cc0. |
| +*/ |
| + |
| +#define CC_STATUS_MDEP_INIT \ |
| + (cc_status.mdep.flags = CC_NONE , cc_status.mdep.cond_exec_cmp_clobbered = 0, cc_status.mdep.value = 0) |
| + |
| +#define FPCC_STATUS_INIT \ |
| + (cc_status.mdep.fpflags = CC_NONE , cc_status.mdep.fpvalue = 0) |
| + |
| +/* |
| +A C compound statement to set the components of cc_status |
| +appropriately for an insn INSN whose body is EXP. It is |
| +this macro's responsibility to recognize insns that set the condition |
| +code as a byproduct of other activity as well as those that explicitly |
| +set (cc0). |
| + |
| +This macro is not used on machines that do not use cc0. |
| + |
| +If there are insns that do not set the condition code but do alter |
| +other machine registers, this macro must check to see whether they |
| +invalidate the expressions that the condition code is recorded as |
| +reflecting. For example, on the 68000, insns that store in address |
| +registers do not set the condition code, which means that usually |
| +NOTICE_UPDATE_CC can leave cc_status unaltered for such |
| +insns. But suppose that the previous insn set the condition code |
| +based on location 'a4@@(102)' and the current insn stores a new |
| +value in 'a4'. Although the condition code is not changed by |
| +this, it will no longer be true that it reflects the contents of |
| +'a4@@(102)'. Therefore, NOTICE_UPDATE_CC must alter |
| +cc_status in this case to say that nothing is known about the |
| +condition code value. |
| + |
| +The definition of NOTICE_UPDATE_CC must be prepared to deal |
| +with the results of peephole optimization: insns whose patterns are |
| +parallel RTXs containing various reg, mem or |
| +constants which are just the operands. The RTL structure of these |
| +insns is not sufficient to indicate what the insns actually do. What |
| +NOTICE_UPDATE_CC should do when it sees one is just to run |
| +CC_STATUS_INIT. |
| + |
| +A possible definition of NOTICE_UPDATE_CC is to call a function |
| +that looks at an attribute (see Insn Attributes) named, for example, |
| +'cc'. This avoids having detailed information about patterns in |
| +two places, the 'md' file and in NOTICE_UPDATE_CC. |
| +*/ |
| + |
| +#define NOTICE_UPDATE_CC(EXP, INSN) avr32_notice_update_cc(EXP, INSN) |
| + |
| + |
| + |
| + |
| +/****************************************************************************** |
| + * Describing Relative Costs of Operations |
| + *****************************************************************************/ |
| + |
| + |
| + |
| +/* |
| +A C expression for the cost of moving data of mode MODE from a |
| +register in class FROM to one in class TO. The classes are |
| +expressed using the enumeration values such as GENERAL_REGS. A |
| +value of 2 is the default; other values are interpreted relative to |
| +that. |
| + |
| +It is not required that the cost always equal 2 when FROM is the |
| +same as TO; on some machines it is expensive to move between |
| +registers if they are not general registers. |
| + |
| +If reload sees an insn consisting of a single set between two |
| +hard registers, and if REGISTER_MOVE_COST applied to their |
| +classes returns a value of 2, reload does not check to ensure that the |
| +constraints of the insn are met. Setting a cost of other than 2 will |
| +allow reload to verify that the constraints are met. You should do this |
| +if the movm pattern's constraints do not allow such copying. |
| +*/ |
| +#define REGISTER_MOVE_COST(MODE, FROM, TO) \ |
| + ((GET_MODE_SIZE(MODE) <= 4) ? 2: \ |
| + (GET_MODE_SIZE(MODE) <= 8) ? 3: \ |
| + 4) |
| + |
| +/* |
| +A C expression for the cost of moving data of mode MODE between a |
| +register of class CLASS and memory; IN is zero if the value |
| +is to be written to memory, nonzero if it is to be read in. This cost |
| +is relative to those in REGISTER_MOVE_COST. If moving between |
| +registers and memory is more expensive than between two registers, you |
| +should define this macro to express the relative cost. |
| + |
| +If you do not define this macro, GCC uses a default cost of 4 plus |
| +the cost of copying via a secondary reload register, if one is |
| +needed. If your machine requires a secondary reload register to copy |
| +between memory and a register of CLASS but the reload mechanism is |
| +more complex than copying via an intermediate, define this macro to |
| +reflect the actual cost of the move. |
| + |
| +GCC defines the function memory_move_secondary_cost if |
| +secondary reloads are needed. It computes the costs due to copying via |
| +a secondary register. If your machine copies from memory using a |
| +secondary register in the conventional way but the default base value of |
| +4 is not correct for your machine, define this macro to add some other |
| +value to the result of that function. The arguments to that function |
| +are the same as to this macro. |
| +*/ |
| +/* |
| + Memory moves are costly |
| +*/ |
| +#define MEMORY_MOVE_COST(MODE, CLASS, IN) \ |
| + (((IN) ? ((GET_MODE_SIZE(MODE) < 4) ? 4 : \ |
| + (GET_MODE_SIZE(MODE) > 8) ? 6 : \ |
| + 3) \ |
| + : ((GET_MODE_SIZE(MODE) > 8) ? 6 : 3))) |
| + |
| +/* |
| +A C expression for the cost of a branch instruction. A value of 1 is |
| +the default; other values are interpreted relative to that. |
| +*/ |
| + /* Try to use conditionals as much as possible */ |
| +#define BRANCH_COST (TARGET_BRANCH_PRED ? 3 : 4) |
| + |
| +/*A C expression for the maximum number of instructions to execute via conditional |
| + execution instructions instead of a branch. A value of BRANCH_COST+1 is the default |
| + if the machine does not use cc0, and 1 if it does use cc0.*/ |
| +#define MAX_CONDITIONAL_EXECUTE 4 |
| + |
| +/* |
| +Define this macro as a C expression which is nonzero if accessing less |
| +than a word of memory (i.e.: a char or a short) is no |
| +faster than accessing a word of memory, i.e., if such access |
| +require more than one instruction or if there is no difference in cost |
| +between byte and (aligned) word loads. |
| + |
| +When this macro is not defined, the compiler will access a field by |
| +finding the smallest containing object; when it is defined, a fullword |
| +load will be used if alignment permits. Unless bytes accesses are |
| +faster than word accesses, using word accesses is preferable since it |
| +may eliminate subsequent memory access if subsequent accesses occur to |
| +other fields in the same word of the structure, but to different bytes. |
| +*/ |
| +#define SLOW_BYTE_ACCESS 1 |
| + |
| + |
| +/* |
| +Define this macro if it is as good or better to call a constant |
| +function address than to call an address kept in a register. |
| +*/ |
| +#define NO_FUNCTION_CSE |
| + |
| + |
| +/****************************************************************************** |
| + * Adjusting the Instruction Scheduler |
| + *****************************************************************************/ |
| + |
| +/***************************************************************************** |
| + * Dividing the Output into Sections (Texts, Data, ...) * |
| + *****************************************************************************/ |
| + |
| +/* |
| +A C expression whose value is a string, including spacing, containing the |
| +assembler operation that should precede instructions and read-only data. |
| +Normally "\t.text" is right. |
| +*/ |
| +#define TEXT_SECTION_ASM_OP "\t.text" |
| +/* |
| +A C statement that switches to the default section containing instructions. |
| +Normally this is not needed, as simply defining TEXT_SECTION_ASM_OP |
| +is enough. The MIPS port uses this to sort all functions after all data |
| +declarations. |
| +*/ |
| +/* #define TEXT_SECTION */ |
| + |
| +/* |
| +A C expression whose value is a string, including spacing, containing the |
| +assembler operation to identify the following data as writable initialized |
| +data. Normally "\t.data" is right. |
| +*/ |
| +#define DATA_SECTION_ASM_OP "\t.data" |
| + |
| +/* |
| +If defined, a C expression whose value is a string, including spacing, |
| +containing the assembler operation to identify the following data as |
| +shared data. If not defined, DATA_SECTION_ASM_OP will be used. |
| +*/ |
| + |
| +/* |
| +A C expression whose value is a string, including spacing, containing |
| +the assembler operation to identify the following data as read-only |
| +initialized data. |
| +*/ |
| +#undef READONLY_DATA_SECTION_ASM_OP |
| +#define READONLY_DATA_SECTION_ASM_OP \ |
| + ((TARGET_USE_RODATA_SECTION) ? \ |
| + "\t.section\t.rodata" : \ |
| + TEXT_SECTION_ASM_OP ) |
| + |
| + |
| +/* |
| +If defined, a C expression whose value is a string, including spacing, |
| +containing the assembler operation to identify the following data as |
| +uninitialized global data. If not defined, and neither |
| +ASM_OUTPUT_BSS nor ASM_OUTPUT_ALIGNED_BSS are defined, |
| +uninitialized global data will be output in the data section if |
| +-fno-common is passed, otherwise ASM_OUTPUT_COMMON will be |
| +used. |
| +*/ |
| +#define BSS_SECTION_ASM_OP "\t.section\t.bss" |
| + |
| +/* |
| +If defined, a C expression whose value is a string, including spacing, |
| +containing the assembler operation to identify the following data as |
| +uninitialized global shared data. If not defined, and |
| +BSS_SECTION_ASM_OP is, the latter will be used. |
| +*/ |
| +/*#define SHARED_BSS_SECTION_ASM_OP "\trseg\tshared_bbs_section:data:noroot(0)\n"*/ |
| +/* |
| +If defined, a C expression whose value is a string, including spacing, |
| +containing the assembler operation to identify the following data as |
| +initialization code. If not defined, GCC will assume such a section does |
| +not exist. |
| +*/ |
| +#undef INIT_SECTION_ASM_OP |
| +#define INIT_SECTION_ASM_OP "\t.section\t.init" |
| + |
| +/* |
| +If defined, a C expression whose value is a string, including spacing, |
| +containing the assembler operation to identify the following data as |
| +finalization code. If not defined, GCC will assume such a section does |
| +not exist. |
| +*/ |
| +#undef FINI_SECTION_ASM_OP |
| +#define FINI_SECTION_ASM_OP "\t.section\t.fini" |
| + |
| +/* |
| +If defined, an ASM statement that switches to a different section |
| +via SECTION_OP, calls FUNCTION, and switches back to |
| +the text section. This is used in crtstuff.c if |
| +INIT_SECTION_ASM_OP or FINI_SECTION_ASM_OP to calls |
| +to initialization and finalization functions from the init and fini |
| +sections. By default, this macro uses a simple function call. Some |
| +ports need hand-crafted assembly code to avoid dependencies on |
| +registers initialized in the function prologue or to ensure that |
| +constant pools don't end up too far way in the text section. |
| +*/ |
| +#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ |
| + asm ( SECTION_OP "\n" \ |
| + "mcall r6[" USER_LABEL_PREFIX #FUNC "@got]\n" \ |
| + TEXT_SECTION_ASM_OP); |
| + |
| + |
| +/* |
| +Define this macro to be an expression with a nonzero value if jump |
| +tables (for tablejump insns) should be output in the text |
| +section, along with the assembler instructions. Otherwise, the |
| +readonly data section is used. |
| + |
| +This macro is irrelevant if there is no separate readonly data section. |
| +*/ |
| +/* Put jump tables in text section if we have caches. Otherwise assume that |
| + loading data from code memory is slow. */ |
| +#define JUMP_TABLES_IN_TEXT_SECTION \ |
| + (TARGET_CACHES ? 1 : 0) |
| + |
| + |
| +/****************************************************************************** |
| + * Position Independent Code (PIC) |
| + *****************************************************************************/ |
| + |
| +#ifndef AVR32_ALWAYS_PIC |
| +#define AVR32_ALWAYS_PIC 0 |
| +#endif |
| + |
| +/* GOT is set to r6 */ |
| +#define PIC_OFFSET_TABLE_REGNUM INTERNAL_REGNUM(6) |
| + |
| +/* |
| +A C expression that is nonzero if X is a legitimate immediate |
| +operand on the target machine when generating position independent code. |
| +You can assume that X satisfies CONSTANT_P, so you need not |
| +check this. You can also assume flag_pic is true, so you need not |
| +check it either. You need not define this macro if all constants |
| +(including SYMBOL_REF) can be immediate operands when generating |
| +position independent code. |
| +*/ |
| +/* We can't directly access anything that contains a symbol, |
| + nor can we indirect via the constant pool. */ |
| +#define LEGITIMATE_PIC_OPERAND_P(X) avr32_legitimate_pic_operand_p(X) |
| + |
| + |
| +/* We need to know when we are making a constant pool; this determines |
| + whether data needs to be in the GOT or can be referenced via a GOT |
| + offset. */ |
| +extern int making_const_table; |
| + |
| +/****************************************************************************** |
| + * Defining the Output Assembler Language |
| + *****************************************************************************/ |
| + |
| + |
| +/* |
| +A C string constant describing how to begin a comment in the target |
| +assembler language. The compiler assumes that the comment will end at |
| +the end of the line. |
| +*/ |
| +#define ASM_COMMENT_START "# " |
| + |
| +/* |
| +A C string constant for text to be output before each asm |
| +statement or group of consecutive ones. Normally this is |
| +"#APP", which is a comment that has no effect on most |
| +assemblers but tells the GNU assembler that it must check the lines |
| +that follow for all valid assembler constructs. |
| +*/ |
| +#undef ASM_APP_ON |
| +#define ASM_APP_ON "#APP\n" |
| + |
| +/* |
| +A C string constant for text to be output after each asm |
| +statement or group of consecutive ones. Normally this is |
| +"#NO_APP", which tells the GNU assembler to resume making the |
| +time-saving assumptions that are valid for ordinary compiler output. |
| +*/ |
| +#undef ASM_APP_OFF |
| +#define ASM_APP_OFF "#NO_APP\n" |
| + |
| + |
| + |
| +#define FILE_ASM_OP "\t.file\n" |
| +#define IDENT_ASM_OP "\t.ident\t" |
| +#define SET_ASM_OP "\t.set\t" |
| + |
| + |
| +/* |
| + * Output assembly directives to switch to section name. The section |
| + * should have attributes as specified by flags, which is a bit mask |
| + * of the SECTION_* flags defined in 'output.h'. If align is nonzero, |
| + * it contains an alignment in bytes to be used for the section, |
| + * otherwise some target default should be used. Only targets that |
| + * must specify an alignment within the section directive need pay |
| + * attention to align -- we will still use ASM_OUTPUT_ALIGN. |
| + * |
| + * NOTE: This one must not be moved to avr32.c |
| + */ |
| +#undef TARGET_ASM_NAMED_SECTION |
| +#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section |
| + |
| + |
| +/* |
| +You may define this macro as a C expression. You should define the |
| +expression to have a nonzero value if GCC should output the constant |
| +pool for a function before the code for the function, or a zero value if |
| +GCC should output the constant pool after the function. If you do |
| +not define this macro, the usual case, GCC will output the constant |
| +pool before the function. |
| +*/ |
| +#define CONSTANT_POOL_BEFORE_FUNCTION 0 |
| + |
| + |
| +/* |
| +Define this macro as a C expression which is nonzero if the constant |
| +EXP, of type tree, should be output after the code for a |
| +function. The compiler will normally output all constants before the |
| +function; you need not define this macro if this is OK. |
| +*/ |
| +#define CONSTANT_AFTER_FUNCTION_P(EXP) 1 |
| + |
| + |
| +/* |
| +Define this macro as a C expression which is nonzero if C is |
| +used as a logical line separator by the assembler. |
| + |
| +If you do not define this macro, the default is that only |
| +the character ';' is treated as a logical line separator. |
| +*/ |
| +#define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == '\n') |
| + |
| + |
| +/** Output of Uninitialized Variables **/ |
| + |
| +/* |
| +A C statement (sans semicolon) to output to the stdio stream |
| +STREAM the assembler definition of a common-label named |
| +NAME whose size is SIZE bytes. The variable ROUNDED |
| +is the size rounded up to whatever alignment the caller wants. |
| + |
| +Use the expression assemble_name(STREAM, NAME) to |
| +output the name itself; before and after that, output the additional |
| +assembler syntax for defining the name, and a newline. |
| + |
| +This macro controls how the assembler definitions of uninitialized |
| +common global variables are output. |
| +*/ |
| +/* |
| +#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \ |
| + avr32_asm_output_common(STREAM, NAME, SIZE, ROUNDED) |
| +*/ |
| + |
| +#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \ |
| + do \ |
| + { \ |
| + fputs ("\t.comm ", (FILE)); \ |
| + assemble_name ((FILE), (NAME)); \ |
| + fprintf ((FILE), ",%d\n", (SIZE)); \ |
| + } \ |
| + while (0) |
| + |
| +/* |
| + * Like ASM_OUTPUT_BSS except takes the required alignment as a |
| + * separate, explicit argument. If you define this macro, it is used |
| + * in place of ASM_OUTPUT_BSS, and gives you more flexibility in |
| + * handling the required alignment of the variable. The alignment is |
| + * specified as the number of bits. |
| + * |
| + * Try to use function asm_output_aligned_bss defined in file varasm.c |
| + * when defining this macro. |
| + */ |
| +#define ASM_OUTPUT_ALIGNED_BSS(STREAM, DECL, NAME, SIZE, ALIGNMENT) \ |
| + asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGNMENT) |
| + |
| +/* |
| +A C statement (sans semicolon) to output to the stdio stream |
| +STREAM the assembler definition of a local-common-label named |
| +NAME whose size is SIZE bytes. The variable ROUNDED |
| +is the size rounded up to whatever alignment the caller wants. |
| + |
| +Use the expression assemble_name(STREAM, NAME) to |
| +output the name itself; before and after that, output the additional |
| +assembler syntax for defining the name, and a newline. |
| + |
| +This macro controls how the assembler definitions of uninitialized |
| +static variables are output. |
| +*/ |
| +#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \ |
| + do \ |
| + { \ |
| + fputs ("\t.lcomm ", (FILE)); \ |
| + assemble_name ((FILE), (NAME)); \ |
| + fprintf ((FILE), ",%d, %d\n", (SIZE), 2); \ |
| + } \ |
| + while (0) |
| + |
| + |
| +/* |
| +A C statement (sans semicolon) to output to the stdio stream |
| +STREAM the assembler definition of a label named NAME. |
| +Use the expression assemble_name(STREAM, NAME) to |
| +output the name itself; before and after that, output the additional |
| +assembler syntax for defining the name, and a newline. |
| +*/ |
| +#define ASM_OUTPUT_LABEL(STREAM, NAME) avr32_asm_output_label(STREAM, NAME) |
| + |
| +/* A C string containing the appropriate assembler directive to |
| + * specify the size of a symbol, without any arguments. On systems |
| + * that use ELF, the default (in 'config/elfos.h') is '"\t.size\t"'; |
| + * on other systems, the default is not to define this macro. |
| + * |
| + * Define this macro only if it is correct to use the default |
| + * definitions of ASM_ OUTPUT_SIZE_DIRECTIVE and |
| + * ASM_OUTPUT_MEASURED_SIZE for your system. If you need your own |
| + * custom definitions of those macros, or if you do not need explicit |
| + * symbol sizes at all, do not define this macro. |
| + */ |
| +#define SIZE_ASM_OP "\t.size\t" |
| + |
| + |
| +/* |
| +A C statement (sans semicolon) to output to the stdio stream |
| +STREAM some commands that will make the label NAME global; |
| +that is, available for reference from other files. Use the expression |
| +assemble_name(STREAM, NAME) to output the name |
| +itself; before and after that, output the additional assembler syntax |
| +for making that name global, and a newline. |
| +*/ |
| +#define GLOBAL_ASM_OP "\t.globl\t" |
| + |
| + |
| + |
| +/* |
| +A C expression which evaluates to true if the target supports weak symbols. |
| + |
| +If you don't define this macro, defaults.h provides a default |
| +definition. If either ASM_WEAKEN_LABEL or ASM_WEAKEN_DECL |
| +is defined, the default definition is '1'; otherwise, it is |
| +'0'. Define this macro if you want to control weak symbol support |
| +with a compiler flag such as -melf. |
| +*/ |
| +#define SUPPORTS_WEAK 1 |
| + |
| +/* |
| +A C statement (sans semicolon) to output to the stdio stream |
| +STREAM a reference in assembler syntax to a label named |
| +NAME. This should add '_' to the front of the name, if that |
| +is customary on your operating system, as it is in most Berkeley Unix |
| +systems. This macro is used in assemble_name. |
| +*/ |
| +#define ASM_OUTPUT_LABELREF(STREAM, NAME) \ |
| + avr32_asm_output_labelref(STREAM, NAME) |
| + |
| + |
| + |
| +/* |
| +A C expression to assign to OUTVAR (which is a variable of type |
| +char *) a newly allocated string made from the string |
| +NAME and the number NUMBER, with some suitable punctuation |
| +added. Use alloca to get space for the string. |
| + |
| +The string will be used as an argument to ASM_OUTPUT_LABELREF to |
| +produce an assembler label for an internal static variable whose name is |
| +NAME. Therefore, the string must be such as to result in valid |
| +assembler code. The argument NUMBER is different each time this |
| +macro is executed; it prevents conflicts between similarly-named |
| +internal static variables in different scopes. |
| + |
| +Ideally this string should not be a valid C identifier, to prevent any |
| +conflict with the user's own symbols. Most assemblers allow periods |
| +or percent signs in assembler symbols; putting at least one of these |
| +between the name and the number will suffice. |
| +*/ |
| +#define ASM_FORMAT_PRIVATE_NAME(OUTVAR, NAME, NUMBER) \ |
| + do \ |
| + { \ |
| + (OUTVAR) = (char *) alloca (strlen ((NAME)) + 10); \ |
| + sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)); \ |
| + } \ |
| + while (0) |
| + |
| + |
| +/** Macros Controlling Initialization Routines **/ |
| + |
| + |
| +/* |
| +If defined, main will not call __main as described above. |
| +This macro should be defined for systems that control start-up code |
| +on a symbol-by-symbol basis, such as OSF/1, and should not |
| +be defined explicitly for systems that support INIT_SECTION_ASM_OP. |
| +*/ |
| +/* |
| + __main is not defined when debugging. |
| +*/ |
| +#define HAS_INIT_SECTION |
| + |
| + |
| +/** Output of Assembler Instructions **/ |
| + |
| +/* |
| +A C initializer containing the assembler's names for the machine |
| +registers, each one as a C string constant. This is what translates |
| +register numbers in the compiler into assembler language. |
| +*/ |
| + |
| +#define REGISTER_NAMES \ |
| +{ \ |
| + "pc", "lr", \ |
| + "sp", "r12", \ |
| + "r11", "r10", \ |
| + "r9", "r8", \ |
| + "r7", "r6", \ |
| + "r5", "r4", \ |
| + "r3", "r2", \ |
| + "r1", "r0", \ |
| + "f15","f14", \ |
| + "f13","f12", \ |
| + "f11","f10", \ |
| + "f9", "f8", \ |
| + "f7", "f6", \ |
| + "f5", "f4", \ |
| + "f3", "f2", \ |
| + "f1", "f0" \ |
| +} |
| + |
| +/* |
| +A C compound statement to output to stdio stream STREAM the |
| +assembler syntax for an instruction operand X. X is an |
| +RTL expression. |
| + |
| +CODE is a value that can be used to specify one of several ways |
| +of printing the operand. It is used when identical operands must be |
| +printed differently depending on the context. CODE comes from |
| +the '%' specification that was used to request printing of the |
| +operand. If the specification was just '%digit' then |
| +CODE is 0; if the specification was '%ltr digit' |
| +then CODE is the ASCII code for ltr. |
| + |
| +If X is a register, this macro should print the register's name. |
| +The names can be found in an array reg_names whose type is |
| +char *[]. reg_names is initialized from REGISTER_NAMES. |
| + |
| +When the machine description has a specification '%punct' |
| +(a '%' followed by a punctuation character), this macro is called |
| +with a null pointer for X and the punctuation character for |
| +CODE. |
| +*/ |
| +#define PRINT_OPERAND(STREAM, X, CODE) avr32_print_operand(STREAM, X, CODE) |
| + |
| +/* A C statement to be executed just prior to the output of |
| + assembler code for INSN, to modify the extracted operands so |
| + they will be output differently. |
| + |
| + Here the argument OPVEC is the vector containing the operands |
| + extracted from INSN, and NOPERANDS is the number of elements of |
| + the vector which contain meaningful data for this insn. |
| + The contents of this vector are what will be used to convert the insn |
| + template into assembler code, so you can change the assembler output |
| + by changing the contents of the vector. */ |
| +#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \ |
| + avr32_final_prescan_insn ((INSN), (OPVEC), (NOPERANDS)) |
| + |
| +/* |
| +A C expression which evaluates to true if CODE is a valid |
| +punctuation character for use in the PRINT_OPERAND macro. If |
| +PRINT_OPERAND_PUNCT_VALID_P is not defined, it means that no |
| +punctuation characters (except for the standard one, '%') are used |
| +in this way. |
| +*/ |
| +#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \ |
| + (((CODE) == '?') \ |
| + || ((CODE) == '!')) |
| + |
| +/* |
| +A C compound statement to output to stdio stream STREAM the |
| +assembler syntax for an instruction operand that is a memory reference |
| +whose address is X. X is an RTL expression. |
| + |
| +On some machines, the syntax for a symbolic address depends on the |
| +section that the address refers to. On these machines, define the macro |
| +ENCODE_SECTION_INFO to store the information into the |
| +symbol_ref, and then check for it here. (see Assembler Format.) |
| +*/ |
| +#define PRINT_OPERAND_ADDRESS(STREAM, X) avr32_print_operand_address(STREAM, X) |
| + |
| + |
| +/** Output of Dispatch Tables **/ |
| + |
| +/* |
| + * A C statement to output to the stdio stream stream an assembler |
| + * pseudo-instruction to generate a difference between two |
| + * labels. value and rel are the numbers of two internal labels. The |
| + * definitions of these labels are output using |
| + * (*targetm.asm_out.internal_label), and they must be printed in the |
| + * same way here. For example, |
| + * |
| + * fprintf (stream, "\t.word L%d-L%d\n", |
| + * value, rel) |
| + * |
| + * You must provide this macro on machines where the addresses in a |
| + * dispatch table are relative to the table's own address. If defined, |
| + * GCC will also use this macro on all machines when producing |
| + * PIC. body is the body of the ADDR_DIFF_VEC; it is provided so that |
| + * the mode and flags can be read. |
| + */ |
| +#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \ |
| + fprintf(STREAM, "\tbral\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE) |
| + |
| +/* |
| +This macro should be provided on machines where the addresses |
| +in a dispatch table are absolute. |
| + |
| +The definition should be a C statement to output to the stdio stream |
| +STREAM an assembler pseudo-instruction to generate a reference to |
| +a label. VALUE is the number of an internal label whose |
| +definition is output using ASM_OUTPUT_INTERNAL_LABEL. |
| +For example, |
| + |
| +fprintf(STREAM, "\t.word L%d\n", VALUE) |
| +*/ |
| + |
| +#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \ |
| + fprintf(STREAM, "\t.long %sL%d\n", LOCAL_LABEL_PREFIX, VALUE) |
| + |
| +/** Assembler Commands for Exception Regions */ |
| + |
| +/* ToDo: All of this subsection */ |
| + |
| +/** Assembler Commands for Alignment */ |
| + |
| + |
| +/* |
| +A C statement to output to the stdio stream STREAM an assembler |
| +command to advance the location counter to a multiple of 2 to the |
| +POWER bytes. POWER will be a C expression of type int. |
| +*/ |
| +#define ASM_OUTPUT_ALIGN(STREAM, POWER) \ |
| + do \ |
| + { \ |
| + if ((POWER) != 0) \ |
| + fprintf(STREAM, "\t.align\t%d\n", POWER); \ |
| + } \ |
| + while (0) |
| + |
| +/* |
| +Like ASM_OUTPUT_ALIGN, except that the \nop" instruction is used for padding, if |
| +necessary. |
| +*/ |
| +#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, POWER) \ |
| + fprintf(STREAM, "\t.balignw\t%d, 0xd703\n", (1 << POWER)) |
| + |
| + |
| + |
| +/****************************************************************************** |
| + * Controlling Debugging Information Format |
| + *****************************************************************************/ |
| + |
| +/* How to renumber registers for dbx and gdb. */ |
| +#define DBX_REGISTER_NUMBER(REGNO) ASM_REGNUM (REGNO) |
| + |
| +/* The DWARF 2 CFA column which tracks the return address. */ |
| +#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM(LR_REGNUM) |
| + |
| +/* |
| +Define this macro if GCC should produce dwarf version 2 format |
| +debugging output in response to the -g option. |
| + |
| +To support optional call frame debugging information, you must also |
| +define INCOMING_RETURN_ADDR_RTX and either set |
| +RTX_FRAME_RELATED_P on the prologue insns if you use RTL for the |
| +prologue, or call dwarf2out_def_cfa and dwarf2out_reg_save |
| +as appropriate from TARGET_ASM_FUNCTION_PROLOGUE if you don't. |
| +*/ |
| +#define DWARF2_DEBUGGING_INFO 1 |
| + |
| + |
| +#define DWARF2_ASM_LINE_DEBUG_INFO 1 |
| +#define DWARF2_FRAME_INFO 1 |
| + |
| + |
| +/****************************************************************************** |
| + * Miscellaneous Parameters |
| + *****************************************************************************/ |
| + |
| +/* ToDo: a lot */ |
| + |
| +/* |
| +An alias for a machine mode name. This is the machine mode that |
| +elements of a jump-table should have. |
| +*/ |
| +#define CASE_VECTOR_MODE SImode |
| + |
| +/* |
| +Define this macro to be a C expression to indicate when jump-tables |
| +should contain relative addresses. If jump-tables never contain |
| +relative addresses, then you need not define this macro. |
| +*/ |
| +#define CASE_VECTOR_PC_RELATIVE 0 |
| + |
| +/* Increase the threshold for using table jumps on the UC arch. */ |
| +#define CASE_VALUES_THRESHOLD (TARGET_BRANCH_PRED ? 4 : 7) |
| + |
| +/* |
| +The maximum number of bytes that a single instruction can move quickly |
| +between memory and registers or between two memory locations. |
| +*/ |
| +#define MOVE_MAX (2*UNITS_PER_WORD) |
| + |
| + |
| +/* A C expression that is nonzero if on this machine the number of bits actually used |
| + for the count of a shift operation is equal to the number of bits needed to represent |
| + the size of the object being shifted. When this macro is nonzero, the compiler will |
| + assume that it is safe to omit a sign-extend, zero-extend, and certain bitwise 'and' |
| + instructions that truncates the count of a shift operation. On machines that have |
| + instructions that act on bit-fields at variable positions, which may include 'bit test' |
| + 378 GNU Compiler Collection (GCC) Internals |
| + instructions, a nonzero SHIFT_COUNT_TRUNCATED also enables deletion of truncations |
| + of the values that serve as arguments to bit-field instructions. |
| + If both types of instructions truncate the count (for shifts) and position (for bit-field |
| + operations), or if no variable-position bit-field instructions exist, you should define |
| + this macro. |
| + However, on some machines, such as the 80386 and the 680x0, truncation only applies |
| + to shift operations and not the (real or pretended) bit-field operations. Define SHIFT_ |
| + COUNT_TRUNCATED to be zero on such machines. Instead, add patterns to the 'md' file |
| + that include the implied truncation of the shift instructions. |
| + You need not dene this macro if it would always have the value of zero. */ |
| +#define SHIFT_COUNT_TRUNCATED 1 |
| + |
| +/* |
| +A C expression which is nonzero if on this machine it is safe to |
| +convert an integer of INPREC bits to one of OUTPREC |
| +bits (where OUTPREC is smaller than INPREC) by merely |
| +operating on it as if it had only OUTPREC bits. |
| + |
| +On many machines, this expression can be 1. |
| + |
| +When TRULY_NOOP_TRUNCATION returns 1 for a pair of sizes for |
| +modes for which MODES_TIEABLE_P is 0, suboptimal code can result. |
| +If this is the case, making TRULY_NOOP_TRUNCATION return 0 in |
| +such cases may improve things. |
| +*/ |
| +#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1 |
| + |
| +/* |
| +An alias for the machine mode for pointers. On most machines, define |
| +this to be the integer mode corresponding to the width of a hardware |
| +pointer; SImode on 32-bit machine or DImode on 64-bit machines. |
| +On some machines you must define this to be one of the partial integer |
| +modes, such as PSImode. |
| + |
| +The width of Pmode must be at least as large as the value of |
| +POINTER_SIZE. If it is not equal, you must define the macro |
| +POINTERS_EXTEND_UNSIGNED to specify how pointers are extended |
| +to Pmode. |
| +*/ |
| +#define Pmode SImode |
| + |
| +/* |
| +An alias for the machine mode used for memory references to functions |
| +being called, in call RTL expressions. On most machines this |
| +should be QImode. |
| +*/ |
| +#define FUNCTION_MODE SImode |
| + |
| + |
| +#define REG_S_P(x) \ |
| + (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (XEXP (x, 0)))) |
| + |
| + |
| +/* If defined, modifies the length assigned to instruction INSN as a |
| + function of the context in which it is used. LENGTH is an lvalue |
| + that contains the initially computed length of the insn and should |
| + be updated with the correct length of the insn. */ |
| +#define ADJUST_INSN_LENGTH(INSN, LENGTH) \ |
| + ((LENGTH) = avr32_adjust_insn_length ((INSN), (LENGTH))) |
| + |
| + |
| +#define CLZ_DEFINED_VALUE_AT_ZERO(mode, value) \ |
| + (value = 32, (mode == SImode)) |
| + |
| +#define CTZ_DEFINED_VALUE_AT_ZERO(mode, value) \ |
| + (value = 32, (mode == SImode)) |
| + |
| +#define UNITS_PER_SIMD_WORD UNITS_PER_WORD |
| + |
| +#define STORE_FLAG_VALUE 1 |
| + |
| + |
| +/* IF-conversion macros. */ |
| +#define IFCVT_MODIFY_INSN( CE_INFO, PATTERN, INSN ) \ |
| + { \ |
| + (PATTERN) = avr32_ifcvt_modify_insn (CE_INFO, PATTERN, INSN, &num_true_changes); \ |
| + } |
| + |
| +#define IFCVT_EXTRA_FIELDS \ |
| + int num_cond_clobber_insns; \ |
| + int num_extra_move_insns; \ |
| + rtx extra_move_insns[MAX_CONDITIONAL_EXECUTE]; \ |
| + rtx moved_insns[MAX_CONDITIONAL_EXECUTE]; |
| + |
| +#define IFCVT_INIT_EXTRA_FIELDS( CE_INFO ) \ |
| + { \ |
| + (CE_INFO)->num_cond_clobber_insns = 0; \ |
| + (CE_INFO)->num_extra_move_insns = 0; \ |
| + } |
| + |
| + |
| +#define IFCVT_MODIFY_CANCEL( CE_INFO ) avr32_ifcvt_modify_cancel (CE_INFO, &num_true_changes) |
| + |
| +#define IFCVT_ALLOW_MODIFY_TEST_IN_INSN 1 |
| +#define IFCVT_COND_EXEC_BEFORE_RELOAD (TARGET_COND_EXEC_BEFORE_RELOAD) |
| + |
| +enum avr32_builtins |
| +{ |
| + AVR32_BUILTIN_MTSR, |
| + AVR32_BUILTIN_MFSR, |
| + AVR32_BUILTIN_MTDR, |
| + AVR32_BUILTIN_MFDR, |
| + AVR32_BUILTIN_CACHE, |
| + AVR32_BUILTIN_SYNC, |
| + AVR32_BUILTIN_SSRF, |
| + AVR32_BUILTIN_CSRF, |
| + AVR32_BUILTIN_TLBR, |
| + AVR32_BUILTIN_TLBS, |
| + AVR32_BUILTIN_TLBW, |
| + AVR32_BUILTIN_BREAKPOINT, |
| + AVR32_BUILTIN_XCHG, |
| + AVR32_BUILTIN_LDXI, |
| + AVR32_BUILTIN_BSWAP16, |
| + AVR32_BUILTIN_BSWAP32, |
| + AVR32_BUILTIN_COP, |
| + AVR32_BUILTIN_MVCR_W, |
| + AVR32_BUILTIN_MVRC_W, |
| + AVR32_BUILTIN_MVCR_D, |
| + AVR32_BUILTIN_MVRC_D, |
| + AVR32_BUILTIN_MULSATHH_H, |
| + AVR32_BUILTIN_MULSATHH_W, |
| + AVR32_BUILTIN_MULSATRNDHH_H, |
| + AVR32_BUILTIN_MULSATRNDWH_W, |
| + AVR32_BUILTIN_MULSATWH_W, |
| + AVR32_BUILTIN_MACSATHH_W, |
| + AVR32_BUILTIN_SATADD_H, |
| + AVR32_BUILTIN_SATSUB_H, |
| + AVR32_BUILTIN_SATADD_W, |
| + AVR32_BUILTIN_SATSUB_W, |
| + AVR32_BUILTIN_MULWH_D, |
| + AVR32_BUILTIN_MULNWH_D, |
| + AVR32_BUILTIN_MACWH_D, |
| + AVR32_BUILTIN_MACHH_D, |
| + AVR32_BUILTIN_MUSFR, |
| + AVR32_BUILTIN_MUSTR, |
| + AVR32_BUILTIN_SATS, |
| + AVR32_BUILTIN_SATU, |
| + AVR32_BUILTIN_SATRNDS, |
| + AVR32_BUILTIN_SATRNDU, |
| + AVR32_BUILTIN_MEMS, |
| + AVR32_BUILTIN_MEMC, |
| + AVR32_BUILTIN_MEMT |
| +}; |
| + |
| + |
| +#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) \ |
| + ((MODE == SFmode) || (MODE == DFmode)) |
| + |
| +#define RENAME_LIBRARY_SET ".set" |
| + |
| +/* Make ABI_NAME an alias for __GCC_NAME. */ |
| +#define RENAME_LIBRARY(GCC_NAME, ABI_NAME) \ |
| + __asm__ (".globl\t__avr32_" #ABI_NAME "\n" \ |
| + ".set\t__avr32_" #ABI_NAME \ |
| + ", __" #GCC_NAME "\n"); |
| + |
| +/* Give libgcc functions avr32 ABI name. */ |
| +#ifdef L_muldi3 |
| +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, mul64) |
| +#endif |
| +#ifdef L_divdi3 |
| +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (divdi3, sdiv64) |
| +#endif |
| +#ifdef L_udivdi3 |
| +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (udivdi3, udiv64) |
| +#endif |
| +#ifdef L_moddi3 |
| +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (moddi3, smod64) |
| +#endif |
| +#ifdef L_umoddi3 |
| +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (umoddi3, umod64) |
| +#endif |
| +#ifdef L_ashldi3 |
| +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashldi3, lsl64) |
| +#endif |
| +#ifdef L_lshrdi3 |
| +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (lshrdi3, lsr64) |
| +#endif |
| +#ifdef L_ashrdi3 |
| +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashrdi3, asr64) |
| +#endif |
| + |
| +#ifdef L_fixsfdi |
| +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f32_to_s64) |
| +#endif |
| +#ifdef L_fixunssfdi |
| +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f32_to_u64) |
| +#endif |
| +#ifdef L_floatdidf |
| +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, s64_to_f64) |
| +#endif |
| +#ifdef L_floatdisf |
| +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdisf, s64_to_f32) |
| +#endif |
| + |
| +#endif |
| --- /dev/null |
| +++ b/gcc/config/avr32/avr32.md |
| @@ -0,0 +1,4893 @@ |
| +;; AVR32 machine description file. |
| +;; Copyright 2003-2006 Atmel Corporation. |
| +;; |
| +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| +;; |
| +;; This file is part of GCC. |
| +;; |
| +;; This program is free software; you can redistribute it and/or modify |
| +;; it under the terms of the GNU General Public License as published by |
| +;; the Free Software Foundation; either version 2 of the License, or |
| +;; (at your option) any later version. |
| +;; |
| +;; This program is distributed in the hope that it will be useful, |
| +;; but WITHOUT ANY WARRANTY; without even the implied warranty of |
| +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| +;; GNU General Public License for more details. |
| +;; |
| +;; You should have received a copy of the GNU General Public License |
| +;; along with this program; if not, write to the Free Software |
| +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| + |
| +;; -*- Mode: Scheme -*- |
| + |
| +(define_attr "type" "alu,alu2,alu_sat,mulhh,mulwh,mulww_w,mulww_d,div,machh_w,macww_w,macww_d,branch,call,load,load_rm,store,load2,load4,store2,store4,fmul,fcmps,fcmpd,fcast,fmv,fmvcpu,fldd,fstd,flds,fsts,fstm" |
| + (const_string "alu")) |
| + |
| + |
| +(define_attr "cc" "none,set_vncz,set_ncz,set_cz,set_z,set_z_if_not_v2,bld,compare,cmp_cond_insn,clobber,call_set,fpcompare,from_fpcc" |
| + (const_string "none")) |
| + |
| + |
| +; NB! Keep this in sync with enum architecture_type in avr32.h |
| +(define_attr "pipeline" "ap,ucr1,ucr2,ucr2nomul" |
| + (const (symbol_ref "avr32_arch->arch_type"))) |
| + |
| +; Insn length in bytes |
| +(define_attr "length" "" |
| + (const_int 4)) |
| + |
| +; Signal if an insn is predicable and hence can be conditionally executed. |
| +(define_attr "predicable" "no,yes" (const_string "no")) |
| + |
| +;; Uses of UNSPEC in this file: |
| +(define_constants |
| + [(UNSPEC_PUSHM 0) |
| + (UNSPEC_POPM 1) |
| + (UNSPEC_UDIVMODSI4_INTERNAL 2) |
| + (UNSPEC_DIVMODSI4_INTERNAL 3) |
| + (UNSPEC_STM 4) |
| + (UNSPEC_LDM 5) |
| + (UNSPEC_MOVSICC 6) |
| + (UNSPEC_ADDSICC 7) |
| + (UNSPEC_COND_MI 8) |
| + (UNSPEC_COND_PL 9) |
| + (UNSPEC_PIC_SYM 10) |
| + (UNSPEC_PIC_BASE 11) |
| + (UNSPEC_STORE_MULTIPLE 12) |
| + (UNSPEC_STMFP 13) |
| + (UNSPEC_FPCC_TO_REG 14) |
| + (UNSPEC_REG_TO_CC 15) |
| + (UNSPEC_FORCE_MINIPOOL 16) |
| + (UNSPEC_SATS 17) |
| + (UNSPEC_SATU 18) |
| + (UNSPEC_SATRNDS 19) |
| + (UNSPEC_SATRNDU 20) |
| + ]) |
| + |
| +(define_constants |
| + [(VUNSPEC_EPILOGUE 0) |
| + (VUNSPEC_CACHE 1) |
| + (VUNSPEC_MTSR 2) |
| + (VUNSPEC_MFSR 3) |
| + (VUNSPEC_BLOCKAGE 4) |
| + (VUNSPEC_SYNC 5) |
| + (VUNSPEC_TLBR 6) |
| + (VUNSPEC_TLBW 7) |
| + (VUNSPEC_TLBS 8) |
| + (VUNSPEC_BREAKPOINT 9) |
| + (VUNSPEC_MTDR 10) |
| + (VUNSPEC_MFDR 11) |
| + (VUNSPEC_MVCR 12) |
| + (VUNSPEC_MVRC 13) |
| + (VUNSPEC_COP 14) |
| + (VUNSPEC_ALIGN 15) |
| + (VUNSPEC_POOL_START 16) |
| + (VUNSPEC_POOL_END 17) |
| + (VUNSPEC_POOL_4 18) |
| + (VUNSPEC_POOL_8 19) |
| + (VUNSPEC_POOL_16 20) |
| + (VUNSPEC_MUSFR 21) |
| + (VUNSPEC_MUSTR 22) |
| + (VUNSPEC_SYNC_CMPXCHG 23) |
| + (VUNSPEC_SYNC_SET_LOCK_AND_LOAD 24) |
| + (VUNSPEC_SYNC_STORE_IF_LOCK 25) |
| + (VUNSPEC_EH_RETURN 26) |
| + (VUNSPEC_FRS 27) |
| + (VUNSPEC_CSRF 28) |
| + (VUNSPEC_SSRF 29) |
| + ]) |
| + |
| +(define_constants |
| + [ |
| + ;; R7 = 15-7 = 8 |
| + (FP_REGNUM 8) |
| + ;; Return Register = R12 = 15 - 12 = 3 |
| + (RETVAL_REGNUM 3) |
| + ;; SP = R13 = 15 - 13 = 2 |
| + (SP_REGNUM 2) |
| + ;; LR = R14 = 15 - 14 = 1 |
| + (LR_REGNUM 1) |
| + ;; PC = R15 = 15 - 15 = 0 |
| + (PC_REGNUM 0) |
| + ;; FPSR = GENERAL_REGS + 1 = 17 |
| + (FPCC_REGNUM 17) |
| + ]) |
| + |
| + |
| + |
| + |
| +;;****************************************************************************** |
| +;; Macros |
| +;;****************************************************************************** |
| + |
| +;; Integer Modes for basic alu insns |
| +(define_mode_macro INTM [SI HI QI]) |
| +(define_mode_attr alu_cc_attr [(SI "set_vncz") (HI "clobber") (QI "clobber")]) |
| + |
| +;; Move word modes |
| +(define_mode_macro MOVM [SI V2HI V4QI]) |
| + |
| +;; For mov/addcc insns |
| +(define_mode_macro ADDCC [SI HI QI]) |
| +(define_mode_macro MOVCC [SF SI HI QI]) |
| +(define_mode_macro CMP [DI SI HI QI]) |
| +(define_mode_attr store_postfix [(SF ".w") (SI ".w") (HI ".h") (QI ".b")]) |
| +(define_mode_attr load_postfix [(SF ".w") (SI ".w") (HI ".sh") (QI ".ub")]) |
| +(define_mode_attr load_postfix_s [(SI ".w") (HI ".sh") (QI ".sb")]) |
| +(define_mode_attr load_postfix_u [(SI ".w") (HI ".uh") (QI ".ub")]) |
| +(define_mode_attr pred_mem_constraint [(SF "RKu11") (SI "RKu11") (HI "RKu10") (QI "RKu09")]) |
| +(define_mode_attr cmp_constraint [(DI "rKu20") (SI "rKs21") (HI "r") (QI "r")]) |
| +(define_mode_attr cmp_predicate [(DI "register_immediate_operand") |
| + (SI "register_const_int_operand") |
| + (HI "register_operand") |
| + (QI "register_operand")]) |
| +(define_mode_attr cmp_length [(DI "6") |
| + (SI "4") |
| + (HI "4") |
| + (QI "4")]) |
| + |
| +;; For all conditional insns |
| +(define_code_macro any_cond [eq ne gt ge lt le gtu geu ltu leu]) |
| +(define_code_attr cond [(eq "eq") (ne "ne") (gt "gt") (ge "ge") (lt "lt") (le "le") |
| + (gtu "hi") (geu "hs") (ltu "lo") (leu "ls")]) |
| +(define_code_attr invcond [(eq "ne") (ne "eq") (gt "le") (ge "lt") (lt "ge") (le "gt") |
| + (gtu "ls") (geu "lo") (ltu "hs") (leu "hi")]) |
| + |
| +;; For logical operations |
| +(define_code_macro logical [and ior xor]) |
| +(define_code_attr logical_insn [(and "and") (ior "or") (xor "eor")]) |
| + |
| +;; Predicable operations with three register operands |
| +(define_code_macro predicable_op3 [and ior xor plus minus]) |
| +(define_code_attr predicable_insn3 [(and "and") (ior "or") (xor "eor") (plus "add") (minus "sub")]) |
| +(define_code_attr predicable_commutative3 [(and "%") (ior "%") (xor "%") (plus "%") (minus "")]) |
| + |
| +;; Load the predicates |
| +(include "predicates.md") |
| + |
| + |
| +;;****************************************************************************** |
| +;; Automaton pipeline description for avr32 |
| +;;****************************************************************************** |
| + |
| +(define_automaton "avr32_ap") |
| + |
| + |
| +(define_cpu_unit "is" "avr32_ap") |
| +(define_cpu_unit "a1,m1,da" "avr32_ap") |
| +(define_cpu_unit "a2,m2,d" "avr32_ap") |
| + |
| +;;Alu instructions |
| +(define_insn_reservation "alu_op" 1 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "alu")) |
| + "is,a1,a2") |
| + |
| +(define_insn_reservation "alu2_op" 2 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "alu2")) |
| + "is,is+a1,a1+a2,a2") |
| + |
| +(define_insn_reservation "alu_sat_op" 2 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "alu_sat")) |
| + "is,a1,a2") |
| + |
| + |
| +;;Mul instructions |
| +(define_insn_reservation "mulhh_op" 2 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "mulhh,mulwh")) |
| + "is,m1,m2") |
| + |
| +(define_insn_reservation "mulww_w_op" 3 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "mulww_w")) |
| + "is,m1,m1+m2,m2") |
| + |
| +(define_insn_reservation "mulww_d_op" 5 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "mulww_d")) |
| + "is,m1,m1+m2,m1+m2,m2,m2") |
| + |
| +(define_insn_reservation "div_op" 33 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "div")) |
| + "is,m1,m1*31 + m2*31,m2") |
| + |
| +(define_insn_reservation "machh_w_op" 3 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "machh_w")) |
| + "is*2,m1,m2") |
| + |
| + |
| +(define_insn_reservation "macww_w_op" 4 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "macww_w")) |
| + "is*2,m1,m1,m2") |
| + |
| + |
| +(define_insn_reservation "macww_d_op" 6 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "macww_d")) |
| + "is*2,m1,m1+m2,m1+m2,m2") |
| + |
| +;;Bypasses for Mac instructions, because of accumulator cache. |
| +;;Set latency as low as possible in order to let the compiler let |
| +;;mul -> mac and mac -> mac combinations which use the same |
| +;;accumulator cache be placed close together to avoid any |
| +;;instructions which can ruin the accumulator cache come inbetween. |
| +(define_bypass 4 "machh_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass") |
| +(define_bypass 5 "macww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass") |
| +(define_bypass 7 "macww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass") |
| + |
| +(define_bypass 3 "mulhh_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass") |
| +(define_bypass 4 "mulww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass") |
| +(define_bypass 6 "mulww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass") |
| + |
| + |
| +;;Bypasses for all mul/mac instructions followed by an instruction |
| +;;which reads the output AND writes the result to the same register. |
| +;;This will generate an Write After Write hazard which gives an |
| +;;extra cycle before the result is ready. |
| +(define_bypass 0 "machh_w_op" "machh_w_op" "avr32_valid_macmac_bypass") |
| +(define_bypass 0 "macww_w_op" "macww_w_op" "avr32_valid_macmac_bypass") |
| +(define_bypass 0 "macww_d_op" "macww_d_op" "avr32_valid_macmac_bypass") |
| + |
| +(define_bypass 0 "mulhh_op" "machh_w_op" "avr32_valid_mulmac_bypass") |
| +(define_bypass 0 "mulww_w_op" "macww_w_op" "avr32_valid_mulmac_bypass") |
| +(define_bypass 0 "mulww_d_op" "macww_d_op" "avr32_valid_mulmac_bypass") |
| + |
| +;;Branch and call instructions |
| +;;We assume that all branches and rcalls are predicted correctly :-) |
| +;;while calls use a lot of cycles. |
| +(define_insn_reservation "branch_op" 0 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "branch")) |
| + "nothing") |
| + |
| +(define_insn_reservation "call_op" 10 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "call")) |
| + "nothing") |
| + |
| + |
| +;;Load store instructions |
| +(define_insn_reservation "load_op" 2 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "load")) |
| + "is,da,d") |
| + |
| +(define_insn_reservation "load_rm_op" 3 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "load_rm")) |
| + "is,da,d") |
| + |
| + |
| +(define_insn_reservation "store_op" 0 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "store")) |
| + "is,da,d") |
| + |
| + |
| +(define_insn_reservation "load_double_op" 3 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "load2")) |
| + "is,da,da+d,d") |
| + |
| +(define_insn_reservation "load_quad_op" 4 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "load4")) |
| + "is,da,da+d,da+d,d") |
| + |
| +(define_insn_reservation "store_double_op" 0 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "store2")) |
| + "is,da,da+d,d") |
| + |
| + |
| +(define_insn_reservation "store_quad_op" 0 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "store4")) |
| + "is,da,da+d,da+d,d") |
| + |
| +;;For store the operand to write to memory is read in d and |
| +;;the real latency between any instruction and a store is therefore |
| +;;one less than for the instructions which reads the operands in the first |
| +;;excecution stage |
| +(define_bypass 2 "load_double_op" "store_double_op" "avr32_store_bypass") |
| +(define_bypass 3 "load_quad_op" "store_quad_op" "avr32_store_bypass") |
| +(define_bypass 1 "load_op" "store_op" "avr32_store_bypass") |
| +(define_bypass 2 "load_rm_op" "store_op" "avr32_store_bypass") |
| +(define_bypass 1 "alu_sat_op" "store_op" "avr32_store_bypass") |
| +(define_bypass 1 "alu2_op" "store_op" "avr32_store_bypass") |
| +(define_bypass 1 "mulhh_op" "store_op" "avr32_store_bypass") |
| +(define_bypass 2 "mulww_w_op" "store_op" "avr32_store_bypass") |
| +(define_bypass 4 "mulww_d_op" "store_op" "avr32_store_bypass" ) |
| +(define_bypass 2 "machh_w_op" "store_op" "avr32_store_bypass") |
| +(define_bypass 3 "macww_w_op" "store_op" "avr32_store_bypass") |
| +(define_bypass 5 "macww_d_op" "store_op" "avr32_store_bypass") |
| + |
| + |
| +; Bypass for load double operation. If only the first loaded word is needed |
| +; then the latency is 2 |
| +(define_bypass 2 "load_double_op" |
| + "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op, |
| + mulww_d_op, machh_w_op, macww_w_op, macww_d_op" |
| + "avr32_valid_load_double_bypass") |
| + |
| +; Bypass for load quad operation. If only the first or second loaded word is needed |
| +; we set the latency to 2 |
| +(define_bypass 2 "load_quad_op" |
| + "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op, |
| + mulww_d_op, machh_w_op, macww_w_op, macww_d_op" |
| + "avr32_valid_load_quad_bypass") |
| + |
| + |
| +;;****************************************************************************** |
| +;; End of Automaton pipeline description for avr32 |
| +;;****************************************************************************** |
| + |
| +(define_cond_exec |
| + [(match_operator 0 "avr32_comparison_operator" |
| + [(match_operand:CMP 1 "register_operand" "r") |
| + (match_operand:CMP 2 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])] |
| + "TARGET_V2_INSNS" |
| + "%!" |
| +) |
| + |
| +(define_cond_exec |
| + [(match_operator 0 "avr32_comparison_operator" |
| + [(and:SI (match_operand:SI 1 "register_operand" "r") |
| + (match_operand:SI 2 "one_bit_set_operand" "i")) |
| + (const_int 0)])] |
| + "TARGET_V2_INSNS" |
| + "%!" |
| + ) |
| + |
| +;;============================================================================= |
| +;; move |
| +;;----------------------------------------------------------------------------- |
| + |
| + |
| +;;== char - 8 bits ============================================================ |
| +(define_expand "movqi" |
| + [(set (match_operand:QI 0 "nonimmediate_operand" "") |
| + (match_operand:QI 1 "general_operand" ""))] |
| + "" |
| + { |
| + if ( !no_new_pseudos ){ |
| + if (GET_CODE (operands[1]) == MEM && optimize){ |
| + rtx reg = gen_reg_rtx (SImode); |
| + |
| + emit_insn (gen_zero_extendqisi2 (reg, operands[1])); |
| + operands[1] = gen_lowpart (QImode, reg); |
| + } |
| + |
| + /* One of the ops has to be in a register. */ |
| + if (GET_CODE (operands[0]) == MEM) |
| + operands[1] = force_reg (QImode, operands[1]); |
| + } |
| + |
| + }) |
| + |
| +(define_insn "*movqi_internal" |
| + [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r") |
| + (match_operand:QI 1 "general_operand" "rKs08,m,r,i"))] |
| + "register_operand (operands[0], QImode) |
| + || register_operand (operands[1], QImode)" |
| + "@ |
| + mov\t%0, %1 |
| + ld.ub\t%0, %1 |
| + st.b\t%0, %1 |
| + mov\t%0, %1" |
| + [(set_attr "length" "2,4,4,4") |
| + (set_attr "type" "alu,load_rm,store,alu")]) |
| + |
| + |
| + |
| +;;== short - 16 bits ========================================================== |
| +(define_expand "movhi" |
| + [(set (match_operand:HI 0 "nonimmediate_operand" "") |
| + (match_operand:HI 1 "general_operand" ""))] |
| + "" |
| + { |
| + if ( !no_new_pseudos ){ |
| + if (GET_CODE (operands[1]) == MEM && optimize){ |
| + rtx reg = gen_reg_rtx (SImode); |
| + |
| + emit_insn (gen_extendhisi2 (reg, operands[1])); |
| + operands[1] = gen_lowpart (HImode, reg); |
| + } |
| + |
| + /* One of the ops has to be in a register. */ |
| + if (GET_CODE (operands[0]) == MEM) |
| + operands[1] = force_reg (HImode, operands[1]); |
| + } |
| + |
| + }) |
| + |
| + |
| +(define_insn "*movhi_internal" |
| + [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r") |
| + (match_operand:HI 1 "general_operand" "rKs08,m,r,i"))] |
| + "register_operand (operands[0], HImode) |
| + || register_operand (operands[1], HImode)" |
| + "@ |
| + mov\t%0, %1 |
| + ld.sh\t%0, %1 |
| + st.h\t%0, %1 |
| + mov\t%0, %1" |
| + [(set_attr "length" "2,4,4,4") |
| + (set_attr "type" "alu,load_rm,store,alu")]) |
| + |
| + |
| +;;== int - 32 bits ============================================================ |
| + |
| +(define_expand "movmisalignsi" |
| + [(set (match_operand:SI 0 "nonimmediate_operand" "") |
| + (match_operand:SI 1 "nonimmediate_operand" ""))] |
| + "TARGET_UNALIGNED_WORD" |
| + { |
| + } |
| +) |
| + |
| + |
| +(define_expand "mov<mode>" |
| + [(set (match_operand:MOVM 0 "avr32_non_rmw_nonimmediate_operand" "") |
| + (match_operand:MOVM 1 "avr32_non_rmw_general_operand" ""))] |
| + "" |
| + { |
| + |
| + /* One of the ops has to be in a register. */ |
| + if (GET_CODE (operands[0]) == MEM) |
| + operands[1] = force_reg (<MODE>mode, operands[1]); |
| + |
| + |
| + /* Check for out of range immediate constants as these may |
| + occur during reloading, since it seems like reload does |
| + not check if the immediate is legitimate. Don't know if |
| + this is a bug? */ |
| + if ( reload_in_progress |
| + && avr32_imm_in_const_pool |
| + && GET_CODE(operands[1]) == CONST_INT |
| + && !avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', "Ks21") ){ |
| + operands[1] = force_const_mem(SImode, operands[1]); |
| + } |
| + |
| + /* Check for RMW memory operands. They are not allowed for mov operations |
| + only the atomic memc/s/t operations */ |
| + if ( !reload_in_progress |
| + && avr32_rmw_memory_operand (operands[0], <MODE>mode) ){ |
| + operands[0] = copy_rtx (operands[0]); |
| + XEXP(operands[0], 0) = force_reg (<MODE>mode, XEXP(operands[0], 0)); |
| + } |
| + |
| + if ( !reload_in_progress |
| + && avr32_rmw_memory_operand (operands[1], <MODE>mode) ){ |
| + operands[1] = copy_rtx (operands[1]); |
| + XEXP(operands[1], 0) = force_reg (<MODE>mode, XEXP(operands[1], 0)); |
| + } |
| + |
| + if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS) |
| + && !avr32_legitimate_pic_operand_p(operands[1]) ) |
| + operands[1] = legitimize_pic_address (operands[1], <MODE>mode, |
| + (no_new_pseudos ? operands[0] : 0)); |
| + else if ( flag_pic && avr32_address_operand(operands[1], GET_MODE(operands[1])) ) |
| + /* If we have an address operand then this function uses the pic register. */ |
| + current_function_uses_pic_offset_table = 1; |
| + }) |
| + |
| + |
| + |
| +(define_insn "mov<mode>_internal" |
| + [(set (match_operand:MOVM 0 "avr32_non_rmw_nonimmediate_operand" "=r, r, r,r,r,Q,r") |
| + (match_operand:MOVM 1 "avr32_non_rmw_general_operand" "rKs08,Ks21,J,n,Q,r,W"))] |
| + "(register_operand (operands[0], <MODE>mode) |
| + || register_operand (operands[1], <MODE>mode)) |
| + && !avr32_rmw_memory_operand (operands[0], <MODE>mode) |
| + && !avr32_rmw_memory_operand (operands[1], <MODE>mode)" |
| + { |
| + switch (which_alternative) { |
| + case 0: |
| + case 1: return "mov\t%0, %1"; |
| + case 2: |
| + if ( TARGET_V2_INSNS ) |
| + return "movh\t%0, hi(%1)"; |
| + /* Fallthrough */ |
| + case 3: return "mov\t%0, lo(%1)\;orh\t%0,hi(%1)"; |
| + case 4: |
| + if ( (REG_P(XEXP(operands[1], 0)) |
| + && REGNO(XEXP(operands[1], 0)) == SP_REGNUM) |
| + || (GET_CODE(XEXP(operands[1], 0)) == PLUS |
| + && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM |
| + && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT |
| + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0 |
| + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) ) |
| + return "lddsp\t%0, %1"; |
| + else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) ) |
| + return "lddpc\t%0, %1"; |
| + else |
| + return "ld.w\t%0, %1"; |
| + case 5: |
| + if ( (REG_P(XEXP(operands[0], 0)) |
| + && REGNO(XEXP(operands[0], 0)) == SP_REGNUM) |
| + || (GET_CODE(XEXP(operands[0], 0)) == PLUS |
| + && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM |
| + && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT |
| + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0 |
| + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) ) |
| + return "stdsp\t%0, %1"; |
| + else |
| + return "st.w\t%0, %1"; |
| + case 6: |
| + if ( TARGET_HAS_ASM_ADDR_PSEUDOS ) |
| + return "lda.w\t%0, %1"; |
| + else |
| + return "ld.w\t%0, r6[%1@got]"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + |
| + [(set_attr "length" "2,4,4,8,4,4,8") |
| + (set_attr "type" "alu,alu,alu,alu2,load,store,load") |
| + (set_attr "cc" "none,none,set_z_if_not_v2,set_z,none,none,clobber")]) |
| + |
| + |
| +(define_expand "reload_out_rmw_memory_operand" |
| + [(set (match_operand:SI 2 "register_operand" "=r") |
| + (match_operand:SI 0 "address_operand" "")) |
| + (set (mem:SI (match_dup 2)) |
| + (match_operand:SI 1 "register_operand" ""))] |
| + "" |
| + { |
| + operands[0] = XEXP(operands[0], 0); |
| + } |
| +) |
| + |
| +(define_expand "reload_in_rmw_memory_operand" |
| + [(set (match_operand:SI 2 "register_operand" "=r") |
| + (match_operand:SI 1 "address_operand" "")) |
| + (set (match_operand:SI 0 "register_operand" "") |
| + (mem:SI (match_dup 2)))] |
| + "" |
| + { |
| + operands[1] = XEXP(operands[1], 0); |
| + } |
| +) |
| + |
| + |
| +;; These instructions are for loading constants which cannot be loaded |
| +;; directly from the constant pool because the offset is too large |
| +;; high and lo_sum are used even tough for our case it should be |
| +;; low and high sum :-) |
| +(define_insn "mov_symbol_lo" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (high:SI (match_operand:SI 1 "immediate_operand" "i" )))] |
| + "" |
| + "mov\t%0, lo(%1)" |
| + [(set_attr "type" "alu") |
| + (set_attr "length" "4")] |
| +) |
| + |
| +(define_insn "add_symbol_hi" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (lo_sum:SI (match_dup 0) |
| + (match_operand:SI 1 "immediate_operand" "i" )))] |
| + "" |
| + "orh\t%0, hi(%1)" |
| + [(set_attr "type" "alu") |
| + (set_attr "length" "4")] |
| +) |
| + |
| + |
| + |
| +;; When generating pic, we need to load the symbol offset into a register. |
| +;; So that the optimizer does not confuse this with a normal symbol load |
| +;; we use an unspec. The offset will be loaded from a constant pool entry, |
| +;; since that is the only type of relocation we can use. |
| +(define_insn "pic_load_addr" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (unspec:SI [(match_operand:SI 1 "" "")] UNSPEC_PIC_SYM))] |
| + "flag_pic && CONSTANT_POOL_ADDRESS_P(XEXP(operands[1], 0))" |
| + "lddpc\t%0, %1" |
| + [(set_attr "type" "load") |
| + (set_attr "length" "4")] |
| +) |
| + |
| +(define_insn "pic_compute_got_from_pc" |
| + [(set (match_operand:SI 0 "register_operand" "+r") |
| + (unspec:SI [(minus:SI (pc) |
| + (match_dup 0))] UNSPEC_PIC_BASE)) |
| + (use (label_ref (match_operand 1 "" "")))] |
| + "flag_pic" |
| + { |
| + (*targetm.asm_out.internal_label) (asm_out_file, "L", |
| + CODE_LABEL_NUMBER (operands[1])); |
| + return \"rsub\t%0, pc\"; |
| + } |
| + [(set_attr "cc" "clobber") |
| + (set_attr "length" "2")] |
| +) |
| + |
| +;;== long long int - 64 bits ================================================== |
| + |
| +(define_expand "movdi" |
| + [(set (match_operand:DI 0 "nonimmediate_operand" "") |
| + (match_operand:DI 1 "general_operand" ""))] |
| + "" |
| + { |
| + |
| + /* One of the ops has to be in a register. */ |
| + if (GET_CODE (operands[0]) != REG) |
| + operands[1] = force_reg (DImode, operands[1]); |
| + |
| + }) |
| + |
| + |
| +(define_insn_and_split "*movdi_internal" |
| + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r, r, r,r,r,m") |
| + (match_operand:DI 1 "general_operand" "r, Ks08,Ks21,G,n,m,r"))] |
| + "register_operand (operands[0], DImode) |
| + || register_operand (operands[1], DImode)" |
| + { |
| + switch (which_alternative ){ |
| + case 0: |
| + case 1: |
| + case 2: |
| + case 3: |
| + case 4: |
| + return "#"; |
| + case 5: |
| + if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1]))) |
| + return "ld.d\t%0, pc[%1 - .]"; |
| + else |
| + return "ld.d\t%0, %1"; |
| + case 6: |
| + return "st.d\t%0, %1"; |
| + default: |
| + abort(); |
| + } |
| + } |
| +;; Lets split all reg->reg or imm->reg transfers into two SImode transfers |
| + "reload_completed && |
| + (REG_P (operands[0]) && |
| + (REG_P (operands[1]) |
| + || GET_CODE (operands[1]) == CONST_INT |
| + || GET_CODE (operands[1]) == CONST_DOUBLE))" |
| + [(set (match_dup 0) (match_dup 1)) |
| + (set (match_dup 2) (match_dup 3))] |
| + { |
| + operands[2] = gen_highpart (SImode, operands[0]); |
| + operands[0] = gen_lowpart (SImode, operands[0]); |
| + if ( REG_P(operands[1]) ){ |
| + operands[3] = gen_highpart(SImode, operands[1]); |
| + operands[1] = gen_lowpart(SImode, operands[1]); |
| + } else if ( GET_CODE(operands[1]) == CONST_DOUBLE |
| + || GET_CODE(operands[1]) == CONST_INT ){ |
| + rtx split_const[2]; |
| + avr32_split_const_expr (DImode, SImode, operands[1], split_const); |
| + operands[3] = split_const[1]; |
| + operands[1] = split_const[0]; |
| + } else { |
| + internal_error("Illegal operand[1] for movdi split!"); |
| + } |
| + } |
| + |
| + [(set_attr "length" "*,*,*,*,*,4,4") |
| + (set_attr "type" "*,*,*,*,*,load2,store2") |
| + (set_attr "cc" "*,*,*,*,*,none,none")]) |
| + |
| + |
| +;;== 128 bits ================================================== |
| +(define_expand "movti" |
| + [(set (match_operand:TI 0 "nonimmediate_operand" "") |
| + (match_operand:TI 1 "nonimmediate_operand" ""))] |
| + "TARGET_ARCH_AP" |
| + { |
| + |
| + /* One of the ops has to be in a register. */ |
| + if (GET_CODE (operands[0]) != REG) |
| + operands[1] = force_reg (TImode, operands[1]); |
| + |
| + /* We must fix any pre_dec for loads and post_inc stores */ |
| + if ( GET_CODE (operands[0]) == MEM |
| + && GET_CODE (XEXP(operands[0],0)) == POST_INC ){ |
| + emit_move_insn(gen_rtx_MEM(TImode, XEXP(XEXP(operands[0],0),0)), operands[1]); |
| + emit_insn(gen_addsi3(XEXP(XEXP(operands[0],0),0), XEXP(XEXP(operands[0],0),0), GEN_INT(GET_MODE_SIZE(TImode)))); |
| + DONE; |
| + } |
| + |
| + if ( GET_CODE (operands[1]) == MEM |
| + && GET_CODE (XEXP(operands[1],0)) == PRE_DEC ){ |
| + emit_insn(gen_addsi3(XEXP(XEXP(operands[1],0),0), XEXP(XEXP(operands[1],0),0), GEN_INT(-GET_MODE_SIZE(TImode)))); |
| + emit_move_insn(operands[0], gen_rtx_MEM(TImode, XEXP(XEXP(operands[1],0),0))); |
| + DONE; |
| + } |
| + }) |
| + |
| + |
| +(define_insn_and_split "*movti_internal" |
| + [(set (match_operand:TI 0 "avr32_movti_dst_operand" "=r,&r, r, <RKu00,r,r") |
| + (match_operand:TI 1 "avr32_movti_src_operand" " r,RKu00>,RKu00,r, n,T"))] |
| + "(register_operand (operands[0], TImode) |
| + || register_operand (operands[1], TImode))" |
| + { |
| + switch (which_alternative ){ |
| + case 0: |
| + case 2: |
| + case 4: |
| + return "#"; |
| + case 1: |
| + return "ldm\t%p1, %0"; |
| + case 3: |
| + return "stm\t%p0, %1"; |
| + case 5: |
| + return "ld.d\t%U0, pc[%1 - .]\;ld.d\t%B0, pc[%1 - . + 8]"; |
| + } |
| + } |
| + |
| + "reload_completed && |
| + (REG_P (operands[0]) && |
| + (REG_P (operands[1]) |
| + /* If this is a load from the constant pool we split it into |
| + two double loads. */ |
| + || (GET_CODE (operands[1]) == MEM |
| + && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF |
| + && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0))) |
| + /* If this is a load where the pointer register is a part |
| + of the register list, we must split it into two double |
| + loads in order for it to be exception safe. */ |
| + || (GET_CODE (operands[1]) == MEM |
| + && register_operand (XEXP (operands[1], 0), SImode) |
| + && reg_overlap_mentioned_p (operands[0], XEXP (operands[1], 0))) |
| + || GET_CODE (operands[1]) == CONST_INT |
| + || GET_CODE (operands[1]) == CONST_DOUBLE))" |
| + [(set (match_dup 0) (match_dup 1)) |
| + (set (match_dup 2) (match_dup 3))] |
| + { |
| + operands[2] = simplify_gen_subreg ( DImode, operands[0], |
| + TImode, 0 ); |
| + operands[0] = simplify_gen_subreg ( DImode, operands[0], |
| + TImode, 8 ); |
| + if ( REG_P(operands[1]) ){ |
| + operands[3] = simplify_gen_subreg ( DImode, operands[1], |
| + TImode, 0 ); |
| + operands[1] = simplify_gen_subreg ( DImode, operands[1], |
| + TImode, 8 ); |
| + } else if ( GET_CODE(operands[1]) == CONST_DOUBLE |
| + || GET_CODE(operands[1]) == CONST_INT ){ |
| + rtx split_const[2]; |
| + avr32_split_const_expr (TImode, DImode, operands[1], split_const); |
| + operands[3] = split_const[1]; |
| + operands[1] = split_const[0]; |
| + } else if (avr32_const_pool_ref_operand (operands[1], GET_MODE(operands[1]))){ |
| + rtx split_const[2]; |
| + rtx cop = avoid_constant_pool_reference (operands[1]); |
| + if (operands[1] == cop) |
| + cop = get_pool_constant (XEXP (operands[1], 0)); |
| + avr32_split_const_expr (TImode, DImode, cop, split_const); |
| + operands[3] = force_const_mem (DImode, split_const[1]); |
| + operands[1] = force_const_mem (DImode, split_const[0]); |
| + } else { |
| + rtx ptr_reg = XEXP (operands[1], 0); |
| + operands[1] = gen_rtx_MEM (DImode, |
| + gen_rtx_PLUS ( SImode, |
| + ptr_reg, |
| + GEN_INT (8) )); |
| + operands[3] = gen_rtx_MEM (DImode, |
| + ptr_reg); |
| + |
| + /* Check if the first load will clobber the pointer. |
| + If so, we must switch the order of the operations. */ |
| + if ( reg_overlap_mentioned_p (operands[0], ptr_reg) ) |
| + { |
| + /* We need to switch the order of the operations |
| + so that the pointer register does not get clobbered |
| + after the first double word load. */ |
| + rtx tmp; |
| + tmp = operands[0]; |
| + operands[0] = operands[2]; |
| + operands[2] = tmp; |
| + tmp = operands[1]; |
| + operands[1] = operands[3]; |
| + operands[3] = tmp; |
| + } |
| + |
| + |
| + } |
| + } |
| + [(set_attr "length" "*,*,4,4,*,8") |
| + (set_attr "type" "*,*,load4,store4,*,load4")]) |
| + |
| + |
| +;;== float - 32 bits ========================================================== |
| +(define_expand "movsf" |
| + [(set (match_operand:SF 0 "nonimmediate_operand" "") |
| + (match_operand:SF 1 "general_operand" ""))] |
| + "" |
| + { |
| + |
| + |
| + /* One of the ops has to be in a register. */ |
| + if (GET_CODE (operands[0]) != REG) |
| + operands[1] = force_reg (SFmode, operands[1]); |
| + |
| + }) |
| + |
| +(define_insn "*movsf_internal" |
| + [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,r,m") |
| + (match_operand:SF 1 "general_operand" "r, G,F,m,r"))] |
| + "(register_operand (operands[0], SFmode) |
| + || register_operand (operands[1], SFmode))" |
| + { |
| + switch (which_alternative) { |
| + case 0: |
| + case 1: return "mov\t%0, %1"; |
| + case 2: |
| + { |
| + HOST_WIDE_INT target_float[2]; |
| + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (operands[1]), SFmode); |
| + if ( TARGET_V2_INSNS |
| + && avr32_hi16_immediate_operand (GEN_INT (target_float[0]), VOIDmode) ) |
| + return "movh\t%0, hi(%1)"; |
| + else |
| + return "mov\t%0, lo(%1)\;orh\t%0, hi(%1)"; |
| + } |
| + case 3: |
| + if ( (REG_P(XEXP(operands[1], 0)) |
| + && REGNO(XEXP(operands[1], 0)) == SP_REGNUM) |
| + || (GET_CODE(XEXP(operands[1], 0)) == PLUS |
| + && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM |
| + && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT |
| + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0 |
| + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) ) |
| + return "lddsp\t%0, %1"; |
| + else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) ) |
| + return "lddpc\t%0, %1"; |
| + else |
| + return "ld.w\t%0, %1"; |
| + case 4: |
| + if ( (REG_P(XEXP(operands[0], 0)) |
| + && REGNO(XEXP(operands[0], 0)) == SP_REGNUM) |
| + || (GET_CODE(XEXP(operands[0], 0)) == PLUS |
| + && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM |
| + && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT |
| + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0 |
| + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) ) |
| + return "stdsp\t%0, %1"; |
| + else |
| + return "st.w\t%0, %1"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + |
| + [(set_attr "length" "2,4,8,4,4") |
| + (set_attr "type" "alu,alu,alu2,load,store") |
| + (set_attr "cc" "none,none,clobber,none,none")]) |
| + |
| + |
| + |
| +;;== double - 64 bits ========================================================= |
| +(define_expand "movdf" |
| + [(set (match_operand:DF 0 "nonimmediate_operand" "") |
| + (match_operand:DF 1 "general_operand" ""))] |
| + "" |
| + { |
| + /* One of the ops has to be in a register. */ |
| + if (GET_CODE (operands[0]) != REG){ |
| + operands[1] = force_reg (DFmode, operands[1]); |
| + } |
| + }) |
| + |
| + |
| +(define_insn_and_split "*movdf_internal" |
| + [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,r,r,m") |
| + (match_operand:DF 1 "general_operand" " r,G,F,m,r"))] |
| + "TARGET_SOFT_FLOAT |
| + && (register_operand (operands[0], DFmode) |
| + || register_operand (operands[1], DFmode))" |
| + { |
| + switch (which_alternative ){ |
| + case 0: |
| + case 1: |
| + case 2: |
| + return "#"; |
| + case 3: |
| + if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1]))) |
| + return "ld.d\t%0, pc[%1 - .]"; |
| + else |
| + return "ld.d\t%0, %1"; |
| + case 4: |
| + return "st.d\t%0, %1"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + "TARGET_SOFT_FLOAT |
| + && reload_completed |
| + && (REG_P (operands[0]) |
| + && (REG_P (operands[1]) |
| + || GET_CODE (operands[1]) == CONST_DOUBLE))" |
| + [(set (match_dup 0) (match_dup 1)) |
| + (set (match_dup 2) (match_dup 3))] |
| + " |
| + { |
| + operands[2] = gen_highpart (SImode, operands[0]); |
| + operands[0] = gen_lowpart (SImode, operands[0]); |
| + operands[3] = gen_highpart(SImode, operands[1]); |
| + operands[1] = gen_lowpart(SImode, operands[1]); |
| + } |
| + " |
| + |
| + [(set_attr "length" "*,*,*,4,4") |
| + (set_attr "type" "*,*,*,load2,store2") |
| + (set_attr "cc" "*,*,*,none,none")]) |
| + |
| + |
| +;;============================================================================= |
| +;; Conditional Moves |
| +;;============================================================================= |
| +(define_insn "ld<mode>_predicable" |
| + [(set (match_operand:MOVCC 0 "register_operand" "=r") |
| + (match_operand:MOVCC 1 "avr32_non_rmw_memory_operand" "<MOVCC:pred_mem_constraint>"))] |
| + "TARGET_V2_INSNS" |
| + "ld<MOVCC:load_postfix>%?\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "type" "load") |
| + (set_attr "predicable" "yes")] |
| +) |
| + |
| + |
| +(define_insn "st<mode>_predicable" |
| + [(set (match_operand:MOVCC 0 "avr32_non_rmw_memory_operand" "=<MOVCC:pred_mem_constraint>") |
| + (match_operand:MOVCC 1 "register_operand" "r"))] |
| + "TARGET_V2_INSNS" |
| + "st<MOVCC:store_postfix>%?\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "type" "store") |
| + (set_attr "predicable" "yes")] |
| +) |
| + |
| +(define_insn "mov<mode>_predicable" |
| + [(set (match_operand:MOVCC 0 "register_operand" "=r") |
| + (match_operand:MOVCC 1 "avr32_cond_register_immediate_operand" "rKs08"))] |
| + "" |
| + "mov%?\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "type" "alu") |
| + (set_attr "predicable" "yes")] |
| +) |
| + |
| + |
| +;;============================================================================= |
| +;; Move chunks of memory |
| +;;============================================================================= |
| + |
| +(define_expand "movmemsi" |
| + [(match_operand:BLK 0 "general_operand" "") |
| + (match_operand:BLK 1 "general_operand" "") |
| + (match_operand:SI 2 "const_int_operand" "") |
| + (match_operand:SI 3 "const_int_operand" "")] |
| + "" |
| + " |
| + if (avr32_gen_movmemsi (operands)) |
| + DONE; |
| + FAIL; |
| + " |
| + ) |
| + |
| + |
| + |
| + |
| +;;============================================================================= |
| +;; Bit field instructions |
| +;;----------------------------------------------------------------------------- |
| +;; Instructions to insert or extract bit-fields |
| +;;============================================================================= |
| + |
| +(define_insn "insv" |
| + [ (set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r") |
| + (match_operand:SI 1 "immediate_operand" "Ku05") |
| + (match_operand:SI 2 "immediate_operand" "Ku05")) |
| + (match_operand 3 "register_operand" "r"))] |
| + "" |
| + "bfins\t%0, %3, %2, %1" |
| + [(set_attr "type" "alu") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "set_ncz")]) |
| + |
| + |
| + |
| +(define_expand "extv" |
| + [ (set (match_operand:SI 0 "register_operand" "") |
| + (sign_extract:SI (match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "immediate_operand" "") |
| + (match_operand:SI 3 "immediate_operand" "")))] |
| + "" |
| + { |
| + if ( INTVAL(operands[2]) >= 32 ) |
| + FAIL; |
| + } |
| +) |
| + |
| +(define_expand "extzv" |
| + [ (set (match_operand:SI 0 "register_operand" "") |
| + (zero_extract:SI (match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "immediate_operand" "") |
| + (match_operand:SI 3 "immediate_operand" "")))] |
| + "" |
| + { |
| + if ( INTVAL(operands[2]) >= 32 ) |
| + FAIL; |
| + } |
| +) |
| + |
| +(define_insn "extv_internal" |
| + [ (set (match_operand:SI 0 "register_operand" "=r") |
| + (sign_extract:SI (match_operand:SI 1 "register_operand" "r") |
| + (match_operand:SI 2 "immediate_operand" "Ku05") |
| + (match_operand:SI 3 "immediate_operand" "Ku05")))] |
| + "INTVAL(operands[2]) < 32" |
| + "bfexts\t%0, %1, %3, %2" |
| + [(set_attr "type" "alu") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "set_ncz")]) |
| + |
| + |
| +(define_insn "extzv_internal" |
| + [ (set (match_operand:SI 0 "register_operand" "=r") |
| + (zero_extract:SI (match_operand:SI 1 "register_operand" "r") |
| + (match_operand:SI 2 "immediate_operand" "Ku05") |
| + (match_operand:SI 3 "immediate_operand" "Ku05")))] |
| + "INTVAL(operands[2]) < 32" |
| + "bfextu\t%0, %1, %3, %2" |
| + [(set_attr "type" "alu") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "set_ncz")]) |
| + |
| + |
| + |
| +;;============================================================================= |
| +;; Some peepholes for avoiding unnecessary cast instructions |
| +;; followed by bfins. |
| +;;----------------------------------------------------------------------------- |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (zero_extend:SI (match_operand:QI 1 "register_operand" ""))) |
| + (set (zero_extract:SI (match_operand 2 "register_operand" "") |
| + (match_operand:SI 3 "immediate_operand" "") |
| + (match_operand:SI 4 "immediate_operand" "")) |
| + (match_dup 0))] |
| + "((peep2_reg_dead_p(2, operands[0]) && |
| + (INTVAL(operands[3]) <= 8)))" |
| + [(set (zero_extract:SI (match_dup 2) |
| + (match_dup 3) |
| + (match_dup 4)) |
| + (match_dup 1))] |
| + ) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (zero_extend:SI (match_operand:HI 1 "register_operand" ""))) |
| + (set (zero_extract:SI (match_operand 2 "register_operand" "") |
| + (match_operand:SI 3 "immediate_operand" "") |
| + (match_operand:SI 4 "immediate_operand" "")) |
| + (match_dup 0))] |
| + "((peep2_reg_dead_p(2, operands[0]) && |
| + (INTVAL(operands[3]) <= 16)))" |
| + [(set (zero_extract:SI (match_dup 2) |
| + (match_dup 3) |
| + (match_dup 4)) |
| + (match_dup 1))] |
| + ) |
| + |
| +;;============================================================================= |
| +;; push bytes |
| +;;----------------------------------------------------------------------------- |
| +;; Implements the push instruction |
| +;;============================================================================= |
| +(define_insn "pushm" |
| + [(set (mem:BLK (pre_dec:BLK (reg:SI SP_REGNUM))) |
| + (unspec:BLK [(match_operand 0 "const_int_operand" "")] |
| + UNSPEC_PUSHM))] |
| + "" |
| + { |
| + if (INTVAL(operands[0])) { |
| + return "pushm\t%r0"; |
| + } else { |
| + return ""; |
| + } |
| + } |
| + [(set_attr "type" "store") |
| + (set_attr "length" "2") |
| + (set_attr "cc" "none")]) |
| + |
| +(define_insn "stm" |
| + [(unspec [(match_operand 0 "register_operand" "r") |
| + (match_operand 1 "const_int_operand" "") |
| + (match_operand 2 "const_int_operand" "")] |
| + UNSPEC_STM)] |
| + "" |
| + { |
| + if (INTVAL(operands[1])) { |
| + if (INTVAL(operands[2]) != 0) |
| + return "stm\t--%0, %s1"; |
| + else |
| + return "stm\t%0, %s1"; |
| + } else { |
| + return ""; |
| + } |
| + } |
| + [(set_attr "type" "store") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| + |
| + |
| + |
| +(define_insn "popm" |
| + [(unspec [(match_operand 0 "const_int_operand" "")] |
| + UNSPEC_POPM)] |
| + "" |
| + { |
| + if (INTVAL(operands[0])) { |
| + return "popm %r0"; |
| + } else { |
| + return ""; |
| + } |
| + } |
| + [(set_attr "type" "load") |
| + (set_attr "length" "2")]) |
| + |
| + |
| + |
| +;;============================================================================= |
| +;; add |
| +;;----------------------------------------------------------------------------- |
| +;; Adds reg1 with reg2 and puts the result in reg0. |
| +;;============================================================================= |
| +(define_insn "add<mode>3" |
| + [(set (match_operand:INTM 0 "register_operand" "=r,r,r,r,r") |
| + (plus:INTM (match_operand:INTM 1 "register_operand" "%0,r,0,r,0") |
| + (match_operand:INTM 2 "avr32_add_operand" "r,r,Is08,Is16,Is21")))] |
| + "" |
| + "@ |
| + add %0, %2 |
| + add %0, %1, %2 |
| + sub %0, %n2 |
| + sub %0, %1, %n2 |
| + sub %0, %n2" |
| + |
| + [(set_attr "length" "2,4,2,4,4") |
| + (set_attr "cc" "<INTM:alu_cc_attr>")]) |
| + |
| +(define_insn "add<mode>3_lsl" |
| + [(set (match_operand:INTM 0 "register_operand" "=r") |
| + (plus:INTM (ashift:INTM (match_operand:INTM 1 "register_operand" "r") |
| + (match_operand:INTM 3 "avr32_add_shift_immediate_operand" "Ku02")) |
| + (match_operand:INTM 2 "register_operand" "r")))] |
| + "" |
| + "add %0, %2, %1 << %3" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "<INTM:alu_cc_attr>")]) |
| + |
| +(define_insn "add<mode>3_lsl2" |
| + [(set (match_operand:INTM 0 "register_operand" "=r") |
| + (plus:INTM (match_operand:INTM 1 "register_operand" "r") |
| + (ashift:INTM (match_operand:INTM 2 "register_operand" "r") |
| + (match_operand:INTM 3 "avr32_add_shift_immediate_operand" "Ku02"))))] |
| + "" |
| + "add %0, %1, %2 << %3" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "<INTM:alu_cc_attr>")]) |
| + |
| + |
| +(define_insn "add<mode>3_mul" |
| + [(set (match_operand:INTM 0 "register_operand" "=r") |
| + (plus:INTM (mult:INTM (match_operand:INTM 1 "register_operand" "r") |
| + (match_operand:INTM 3 "immediate_operand" "Ku04" )) |
| + (match_operand:INTM 2 "register_operand" "r")))] |
| + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) || |
| + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)" |
| + "add %0, %2, %1 << %p3" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "<INTM:alu_cc_attr>")]) |
| + |
| +(define_insn "add<mode>3_mul2" |
| + [(set (match_operand:INTM 0 "register_operand" "=r") |
| + (plus:INTM (match_operand:INTM 1 "register_operand" "r") |
| + (mult:INTM (match_operand:INTM 2 "register_operand" "r") |
| + (match_operand:INTM 3 "immediate_operand" "Ku04" ))))] |
| + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) || |
| + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)" |
| + "add %0, %1, %2 << %p3" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "<INTM:alu_cc_attr>")]) |
| + |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (ashift:SI (match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "immediate_operand" ""))) |
| + (set (match_operand:SI 3 "register_operand" "") |
| + (plus:SI (match_dup 0) |
| + (match_operand:SI 4 "register_operand" "")))] |
| + "(peep2_reg_dead_p(2, operands[0]) && |
| + (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))" |
| + [(set (match_dup 3) |
| + (plus:SI (ashift:SI (match_dup 1) |
| + (match_dup 2)) |
| + (match_dup 4)))] |
| + ) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (ashift:SI (match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "immediate_operand" ""))) |
| + (set (match_operand:SI 3 "register_operand" "") |
| + (plus:SI (match_operand:SI 4 "register_operand" "") |
| + (match_dup 0)))] |
| + "(peep2_reg_dead_p(2, operands[0]) && |
| + (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))" |
| + [(set (match_dup 3) |
| + (plus:SI (ashift:SI (match_dup 1) |
| + (match_dup 2)) |
| + (match_dup 4)))] |
| + ) |
| + |
| +(define_insn "adddi3" |
| + [(set (match_operand:DI 0 "register_operand" "=r,r") |
| + (plus:DI (match_operand:DI 1 "register_operand" "%0,r") |
| + (match_operand:DI 2 "register_operand" "r,r")))] |
| + "" |
| + "@ |
| + add %0, %2\;adc %m0, %m0, %m2 |
| + add %0, %1, %2\;adc %m0, %m1, %m2" |
| + [(set_attr "length" "6,8") |
| + (set_attr "type" "alu2") |
| + (set_attr "cc" "set_vncz")]) |
| + |
| + |
| +(define_insn "add<mode>_imm_predicable" |
| + [(set (match_operand:INTM 0 "register_operand" "+r") |
| + (plus:INTM (match_dup 0) |
| + (match_operand:INTM 1 "avr32_cond_immediate_operand" "%Is08")))] |
| + "" |
| + "sub%?\t%0, -%1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "predicable" "yes")] |
| +) |
| + |
| +;;============================================================================= |
| +;; subtract |
| +;;----------------------------------------------------------------------------- |
| +;; Subtract reg2 or immediate value from reg0 and puts the result in reg0. |
| +;;============================================================================= |
| + |
| +(define_insn "sub<mode>3" |
| + [(set (match_operand:INTM 0 "general_operand" "=r,r,r,r,r,r,r") |
| + (minus:INTM (match_operand:INTM 1 "nonmemory_operand" "0,r,0,r,0,r,Ks08") |
| + (match_operand:INTM 2 "nonmemory_operand" "r,r,Ks08,Ks16,Ks21,0,r")))] |
| + "" |
| + "@ |
| + sub %0, %2 |
| + sub %0, %1, %2 |
| + sub %0, %2 |
| + sub %0, %1, %2 |
| + sub %0, %2 |
| + rsub %0, %1 |
| + rsub %0, %2, %1" |
| + [(set_attr "length" "2,4,2,4,4,2,4") |
| + (set_attr "cc" "<INTM:alu_cc_attr>")]) |
| + |
| +(define_insn "*sub<mode>3_mul" |
| + [(set (match_operand:INTM 0 "register_operand" "=r") |
| + (minus:INTM (match_operand:INTM 1 "register_operand" "r") |
| + (mult:INTM (match_operand:INTM 2 "register_operand" "r") |
| + (match_operand:SI 3 "immediate_operand" "Ku04" ))))] |
| + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) || |
| + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)" |
| + "sub %0, %1, %2 << %p3" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "<INTM:alu_cc_attr>")]) |
| + |
| +(define_insn "*sub<mode>3_lsl" |
| + [(set (match_operand:INTM 0 "register_operand" "=r") |
| + (minus:INTM (match_operand:INTM 1 "register_operand" "r") |
| + (ashift:INTM (match_operand:INTM 2 "register_operand" "r") |
| + (match_operand:SI 3 "avr32_add_shift_immediate_operand" "Ku02"))))] |
| + "" |
| + "sub %0, %1, %2 << %3" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "<INTM:alu_cc_attr>")]) |
| + |
| + |
| +(define_insn "subdi3" |
| + [(set (match_operand:DI 0 "register_operand" "=r,r") |
| + (minus:DI (match_operand:DI 1 "register_operand" "%0,r") |
| + (match_operand:DI 2 "register_operand" "r,r")))] |
| + "" |
| + "@ |
| + sub %0, %2\;sbc %m0, %m0, %m2 |
| + sub %0, %1, %2\;sbc %m0, %m1, %m2" |
| + [(set_attr "length" "6,8") |
| + (set_attr "type" "alu2") |
| + (set_attr "cc" "set_vncz")]) |
| + |
| + |
| +(define_insn "sub<mode>_imm_predicable" |
| + [(set (match_operand:INTM 0 "register_operand" "+r") |
| + (minus:INTM (match_dup 0) |
| + (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")))] |
| + "" |
| + "sub%?\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "predicable" "yes")]) |
| + |
| +(define_insn "rsub<mode>_imm_predicable" |
| + [(set (match_operand:INTM 0 "register_operand" "+r") |
| + (minus:INTM (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08") |
| + (match_dup 0)))] |
| + "" |
| + "rsub%?\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "predicable" "yes")]) |
| + |
| +;;============================================================================= |
| +;; multiply |
| +;;----------------------------------------------------------------------------- |
| +;; Multiply op1 and op2 and put the value in op0. |
| +;;============================================================================= |
| + |
| + |
| +(define_insn "mulqi3" |
| + [(set (match_operand:QI 0 "register_operand" "=r,r,r") |
| + (mult:QI (match_operand:QI 1 "register_operand" "%0,r,r") |
| + (match_operand:QI 2 "avr32_mul_operand" "r,r,Ks08")))] |
| + "!TARGET_NO_MUL_INSNS" |
| + { |
| + switch (which_alternative){ |
| + case 0: |
| + return "mul %0, %2"; |
| + case 1: |
| + return "mul %0, %1, %2"; |
| + case 2: |
| + return "mul %0, %1, %2"; |
| + default: |
| + gcc_unreachable(); |
| + } |
| + } |
| + [(set_attr "type" "mulww_w,mulww_w,mulwh") |
| + (set_attr "length" "2,4,4") |
| + (set_attr "cc" "none")]) |
| + |
| +(define_insn "mulsi3" |
| + [(set (match_operand:SI 0 "register_operand" "=r,r,r") |
| + (mult:SI (match_operand:SI 1 "register_operand" "%0,r,r") |
| + (match_operand:SI 2 "avr32_mul_operand" "r,r,Ks08")))] |
| + "!TARGET_NO_MUL_INSNS" |
| + { |
| + switch (which_alternative){ |
| + case 0: |
| + return "mul %0, %2"; |
| + case 1: |
| + return "mul %0, %1, %2"; |
| + case 2: |
| + return "mul %0, %1, %2"; |
| + default: |
| + gcc_unreachable(); |
| + } |
| + } |
| + [(set_attr "type" "mulww_w,mulww_w,mulwh") |
| + (set_attr "length" "2,4,4") |
| + (set_attr "cc" "none")]) |
| + |
| + |
| +(define_insn "mulhisi3" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (mult:SI |
| + (sign_extend:SI (match_operand:HI 1 "register_operand" "%r")) |
| + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "mulhh.w %0, %1:b, %2:b" |
| + [(set_attr "type" "mulhh") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| + |
| +(define_peephole2 |
| + [(match_scratch:DI 6 "r") |
| + (set (match_operand:SI 0 "register_operand" "") |
| + (mult:SI |
| + (sign_extend:SI (match_operand:HI 1 "register_operand" "")) |
| + (sign_extend:SI (match_operand:HI 2 "register_operand" "")))) |
| + (set (match_operand:SI 3 "register_operand" "") |
| + (ashiftrt:SI (match_dup 0) |
| + (const_int 16)))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP |
| + && (peep2_reg_dead_p(1, operands[0]) || (REGNO(operands[0]) == REGNO(operands[3])))" |
| + [(set (match_dup 4) (sign_extend:SI (match_dup 1))) |
| + (set (match_dup 6) |
| + (ashift:DI (mult:DI (sign_extend:DI (match_dup 4)) |
| + (sign_extend:DI (match_dup 2))) |
| + (const_int 16))) |
| + (set (match_dup 3) (match_dup 5))] |
| + |
| + "{ |
| + operands[4] = gen_rtx_REG(SImode, REGNO(operands[1])); |
| + operands[5] = gen_highpart (SImode, operands[4]); |
| + }" |
| + ) |
| + |
| +(define_insn "mulnhisi3" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (mult:SI |
| + (sign_extend:SI (neg:HI (match_operand:HI 1 "register_operand" "r"))) |
| + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "mulnhh.w %0, %1:b, %2:b" |
| + [(set_attr "type" "mulhh") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| + |
| +(define_insn "machisi3" |
| + [(set (match_operand:SI 0 "register_operand" "+r") |
| + (plus:SI (mult:SI |
| + (sign_extend:SI (match_operand:HI 1 "register_operand" "%r")) |
| + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))) |
| + (match_dup 0)))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "machh.w %0, %1:b, %2:b" |
| + [(set_attr "type" "machh_w") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| + |
| + |
| + |
| +(define_insn "mulsidi3" |
| + [(set (match_operand:DI 0 "register_operand" "=r") |
| + (mult:DI |
| + (sign_extend:DI (match_operand:SI 1 "register_operand" "%r")) |
| + (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))] |
| + "!TARGET_NO_MUL_INSNS" |
| + "muls.d %0, %1, %2" |
| + [(set_attr "type" "mulww_d") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| + |
| +(define_insn "umulsidi3" |
| + [(set (match_operand:DI 0 "register_operand" "=r") |
| + (mult:DI |
| + (zero_extend:DI (match_operand:SI 1 "register_operand" "%r")) |
| + (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))] |
| + "!TARGET_NO_MUL_INSNS" |
| + "mulu.d %0, %1, %2" |
| + [(set_attr "type" "mulww_d") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| + |
| +(define_insn "*mulaccsi3" |
| + [(set (match_operand:SI 0 "register_operand" "+r") |
| + (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r") |
| + (match_operand:SI 2 "register_operand" "r")) |
| + (match_dup 0)))] |
| + "!TARGET_NO_MUL_INSNS" |
| + "mac %0, %1, %2" |
| + [(set_attr "type" "macww_w") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| + |
| +(define_insn "*mulaccsidi3" |
| + [(set (match_operand:DI 0 "register_operand" "+r") |
| + (plus:DI (mult:DI |
| + (sign_extend:DI (match_operand:SI 1 "register_operand" "%r")) |
| + (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))) |
| + (match_dup 0)))] |
| + "!TARGET_NO_MUL_INSNS" |
| + "macs.d %0, %1, %2" |
| + [(set_attr "type" "macww_d") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| + |
| +(define_insn "*umulaccsidi3" |
| + [(set (match_operand:DI 0 "register_operand" "+r") |
| + (plus:DI (mult:DI |
| + (zero_extend:DI (match_operand:SI 1 "register_operand" "%r")) |
| + (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))) |
| + (match_dup 0)))] |
| + "!TARGET_NO_MUL_INSNS" |
| + "macu.d %0, %1, %2" |
| + [(set_attr "type" "macww_d") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| + |
| + |
| + |
| +;; Try to avoid Write-After-Write hazards for mul operations |
| +;; if it can be done |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (mult:SI |
| + (sign_extend:SI (match_operand 1 "general_operand" "")) |
| + (sign_extend:SI (match_operand 2 "general_operand" "")))) |
| + (set (match_dup 0) |
| + (match_operator:SI 3 "alu_operator" [(match_dup 0) |
| + (match_operand 4 "general_operand" "")]))] |
| + "peep2_reg_dead_p(1, operands[2])" |
| + [(set (match_dup 5) |
| + (mult:SI |
| + (sign_extend:SI (match_dup 1)) |
| + (sign_extend:SI (match_dup 2)))) |
| + (set (match_dup 0) |
| + (match_op_dup 3 [(match_dup 5) |
| + (match_dup 4)]))] |
| + "{operands[5] = gen_rtx_REG(SImode, REGNO(operands[2]));}" |
| + ) |
| + |
| + |
| + |
| +;;============================================================================= |
| +;; DSP instructions |
| +;;============================================================================= |
| +(define_insn "mulsathh_h" |
| + [(set (match_operand:HI 0 "register_operand" "=r") |
| + (ss_truncate:HI (ashiftrt:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r")) |
| + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))) |
| + (const_int 15))))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "mulsathh.h\t%0, %1:b, %2:b" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "mulhh")]) |
| + |
| +(define_insn "mulsatrndhh_h" |
| + [(set (match_operand:HI 0 "register_operand" "=r") |
| + (ss_truncate:HI (ashiftrt:SI |
| + (plus:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r")) |
| + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))) |
| + (const_int 1073741824)) |
| + (const_int 15))))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "mulsatrndhh.h\t%0, %1:b, %2:b" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "mulhh")]) |
| + |
| +(define_insn "mulsathh_w" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r")) |
| + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| + (const_int 1))))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "mulsathh.w\t%0, %1:b, %2:b" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "mulhh")]) |
| + |
| +(define_insn "mulsatwh_w" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (ss_truncate:SI (ashiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) |
| + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| + (const_int 15))))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "mulsatwh.w\t%0, %1, %2:b" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "mulwh")]) |
| + |
| +(define_insn "mulsatrndwh_w" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (ss_truncate:SI (ashiftrt:DI (plus:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) |
| + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| + (const_int 1073741824)) |
| + (const_int 15))))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "mulsatrndwh.w\t%0, %1, %2:b" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "mulwh")]) |
| + |
| +(define_insn "macsathh_w" |
| + [(set (match_operand:SI 0 "register_operand" "+r") |
| + (plus:SI (match_dup 0) |
| + (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r")) |
| + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| + (const_int 1)))))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "macsathh.w\t%0, %1:b, %2:b" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "mulhh")]) |
| + |
| + |
| +(define_insn "mulwh_d" |
| + [(set (match_operand:DI 0 "register_operand" "=r") |
| + (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) |
| + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| + (const_int 16)))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "mulwh.d\t%0, %1, %2:b" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "mulwh")]) |
| + |
| + |
| +(define_insn "mulnwh_d" |
| + [(set (match_operand:DI 0 "register_operand" "=r") |
| + (ashift:DI (mult:DI (not:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))) |
| + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| + (const_int 16)))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "mulnwh.d\t%0, %1, %2:b" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "mulwh")]) |
| + |
| +(define_insn "macwh_d" |
| + [(set (match_operand:DI 0 "register_operand" "+r") |
| + (plus:DI (match_dup 0) |
| + (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%r")) |
| + (sign_extend:DI (match_operand:HI 2 "register_operand" "r"))) |
| + (const_int 16))))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "macwh.d\t%0, %1, %2:b" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "mulwh")]) |
| + |
| +(define_insn "machh_d" |
| + [(set (match_operand:DI 0 "register_operand" "+r") |
| + (plus:DI (match_dup 0) |
| + (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r")) |
| + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))))] |
| + "!TARGET_NO_MUL_INSNS && TARGET_DSP" |
| + "machh.d\t%0, %1:b, %2:b" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "mulwh")]) |
| + |
| +(define_insn "satadd_w" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (ss_plus:SI (match_operand:SI 1 "register_operand" "r") |
| + (match_operand:SI 2 "register_operand" "r")))] |
| + "TARGET_DSP" |
| + "satadd.w\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "alu_sat")]) |
| + |
| +(define_insn "satsub_w" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (ss_minus:SI (match_operand:SI 1 "register_operand" "r") |
| + (match_operand:SI 2 "register_operand" "r")))] |
| + "TARGET_DSP" |
| + "satsub.w\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "alu_sat")]) |
| + |
| +(define_insn "satadd_h" |
| + [(set (match_operand:HI 0 "register_operand" "=r") |
| + (ss_plus:HI (match_operand:HI 1 "register_operand" "r") |
| + (match_operand:HI 2 "register_operand" "r")))] |
| + "TARGET_DSP" |
| + "satadd.h\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "alu_sat")]) |
| + |
| +(define_insn "satsub_h" |
| + [(set (match_operand:HI 0 "register_operand" "=r") |
| + (ss_minus:HI (match_operand:HI 1 "register_operand" "r") |
| + (match_operand:HI 2 "register_operand" "r")))] |
| + "TARGET_DSP" |
| + "satsub.h\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "alu_sat")]) |
| + |
| + |
| +;;============================================================================= |
| +;; smin |
| +;;----------------------------------------------------------------------------- |
| +;; Set reg0 to the smallest value of reg1 and reg2. It is used for signed |
| +;; values in the registers. |
| +;;============================================================================= |
| +(define_insn "sminsi3" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (smin:SI (match_operand:SI 1 "register_operand" "r") |
| + (match_operand:SI 2 "register_operand" "r")))] |
| + "" |
| + "min %0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| + |
| +;;============================================================================= |
| +;; smax |
| +;;----------------------------------------------------------------------------- |
| +;; Set reg0 to the largest value of reg1 and reg2. It is used for signed |
| +;; values in the registers. |
| +;;============================================================================= |
| +(define_insn "smaxsi3" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (smax:SI (match_operand:SI 1 "register_operand" "r") |
| + (match_operand:SI 2 "register_operand" "r")))] |
| + "" |
| + "max %0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| + |
| + |
| + |
| +;;============================================================================= |
| +;; Logical operations |
| +;;----------------------------------------------------------------------------- |
| + |
| + |
| +;; Split up simple DImode logical operations. Simply perform the logical |
| +;; operation on the upper and lower halves of the registers. |
| +(define_split |
| + [(set (match_operand:DI 0 "register_operand" "") |
| + (match_operator:DI 6 "logical_binary_operator" |
| + [(match_operand:DI 1 "register_operand" "") |
| + (match_operand:DI 2 "register_operand" "")]))] |
| + "reload_completed" |
| + [(set (match_dup 0) (match_op_dup:SI 6 [(match_dup 1) (match_dup 2)])) |
| + (set (match_dup 3) (match_op_dup:SI 6 [(match_dup 4) (match_dup 5)]))] |
| + " |
| + { |
| + operands[3] = gen_highpart (SImode, operands[0]); |
| + operands[0] = gen_lowpart (SImode, operands[0]); |
| + operands[4] = gen_highpart (SImode, operands[1]); |
| + operands[1] = gen_lowpart (SImode, operands[1]); |
| + operands[5] = gen_highpart (SImode, operands[2]); |
| + operands[2] = gen_lowpart (SImode, operands[2]); |
| + }" |
| +) |
| + |
| +;;============================================================================= |
| +;; Logical operations with shifted operand |
| +;;============================================================================= |
| +(define_insn "<code>si_lshift" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (logical:SI (match_operator:SI 4 "logical_shift_operator" |
| + [(match_operand:SI 2 "register_operand" "r") |
| + (match_operand:SI 3 "immediate_operand" "Ku05")]) |
| + (match_operand:SI 1 "register_operand" "r")))] |
| + "" |
| + { |
| + if ( GET_CODE(operands[4]) == ASHIFT ) |
| + return "<logical_insn>\t%0, %1, %2 << %3"; |
| + else |
| + return "<logical_insn>\t%0, %1, %2 >> %3"; |
| + } |
| + |
| + [(set_attr "cc" "set_z")] |
| +) |
| + |
| + |
| +;;************************************************ |
| +;; Peepholes for detecting logical operantions |
| +;; with shifted operands |
| +;;************************************************ |
| + |
| +(define_peephole |
| + [(set (match_operand:SI 3 "register_operand" "") |
| + (match_operator:SI 5 "logical_shift_operator" |
| + [(match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "immediate_operand" "")])) |
| + (set (match_operand:SI 0 "register_operand" "") |
| + (logical:SI (match_operand:SI 4 "register_operand" "") |
| + (match_dup 3)))] |
| + "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))" |
| + { |
| + if ( GET_CODE(operands[5]) == ASHIFT ) |
| + return "<logical_insn>\t%0, %4, %1 << %2"; |
| + else |
| + return "<logical_insn>\t%0, %4, %1 >> %2"; |
| + } |
| + [(set_attr "cc" "set_z")] |
| + ) |
| + |
| +(define_peephole |
| + [(set (match_operand:SI 3 "register_operand" "") |
| + (match_operator:SI 5 "logical_shift_operator" |
| + [(match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "immediate_operand" "")])) |
| + (set (match_operand:SI 0 "register_operand" "") |
| + (logical:SI (match_dup 3) |
| + (match_operand:SI 4 "register_operand" "")))] |
| + "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))" |
| + { |
| + if ( GET_CODE(operands[5]) == ASHIFT ) |
| + return "<logical_insn>\t%0, %4, %1 << %2"; |
| + else |
| + return "<logical_insn>\t%0, %4, %1 >> %2"; |
| + } |
| + [(set_attr "cc" "set_z")] |
| + ) |
| + |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (match_operator:SI 5 "logical_shift_operator" |
| + [(match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "immediate_operand" "")])) |
| + (set (match_operand:SI 3 "register_operand" "") |
| + (logical:SI (match_operand:SI 4 "register_operand" "") |
| + (match_dup 0)))] |
| + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))" |
| + |
| + [(set (match_dup 3) |
| + (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)]) |
| + (match_dup 4)))] |
| + |
| + "" |
| +) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (match_operator:SI 5 "logical_shift_operator" |
| + [(match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "immediate_operand" "")])) |
| + (set (match_operand:SI 3 "register_operand" "") |
| + (logical:SI (match_dup 0) |
| + (match_operand:SI 4 "register_operand" "")))] |
| + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))" |
| + |
| + [(set (match_dup 3) |
| + (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)]) |
| + (match_dup 4)))] |
| + |
| + "" |
| +) |
| + |
| + |
| +;;============================================================================= |
| +;; and |
| +;;----------------------------------------------------------------------------- |
| +;; Store the result after a bitwise logical-and between reg0 and reg2 in reg0. |
| +;;============================================================================= |
| + |
| +(define_insn "andnsi" |
| + [(set (match_operand:SI 0 "register_operand" "+r") |
| + (and:SI (match_dup 0) |
| + (not:SI (match_operand:SI 1 "register_operand" "r"))))] |
| + "" |
| + "andn %0, %1" |
| + [(set_attr "cc" "set_z") |
| + (set_attr "length" "2")] |
| +) |
| + |
| + |
| +(define_insn "andsi3" |
| + [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r,r,r, r, r,r,r,r,r") |
| + (and:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,r,0,0, 0, 0,0,0,0,r" ) |
| + (match_operand:SI 2 "nonmemory_operand" " N,M,N,Ku16,Ks17,J,L,r,i,r")))] |
| + "" |
| + "@ |
| + memc\t%0, %z2 |
| + bfextu\t%0, %1, 0, %z2 |
| + cbr\t%0, %z2 |
| + andl\t%0, %2, COH |
| + andl\t%0, lo(%2) |
| + andh\t%0, hi(%2), COH |
| + andh\t%0, hi(%2) |
| + and\t%0, %2 |
| + andh\t%0, hi(%2)\;andl\t%0, lo(%2) |
| + and\t%0, %1, %2" |
| + |
| + [(set_attr "length" "4,4,2,4,4,4,4,2,8,4") |
| + (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z,set_z,set_z,set_z,set_z")]) |
| + |
| +(define_insn "anddi3" |
| + [(set (match_operand:DI 0 "register_operand" "=&r,&r") |
| + (and:DI (match_operand:DI 1 "register_operand" "%0,r") |
| + (match_operand:DI 2 "register_operand" "r,r")))] |
| + "" |
| + "#" |
| + [(set_attr "length" "8") |
| + (set_attr "cc" "clobber")] |
| +) |
| + |
| +;;============================================================================= |
| +;; or |
| +;;----------------------------------------------------------------------------- |
| +;; Store the result after a bitwise inclusive-or between reg0 and reg2 in reg0. |
| +;;============================================================================= |
| + |
| +(define_insn "iorsi3" |
| + [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r,r, r,r,r,r") |
| + (ior:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,0,0, 0,0,0,r" ) |
| + (match_operand:SI 2 "nonmemory_operand" " O,O,Ku16,J,r,i,r")))] |
| + "" |
| + "@ |
| + mems\t%0, %p2 |
| + sbr\t%0, %p2 |
| + orl\t%0, %2 |
| + orh\t%0, hi(%2) |
| + or\t%0, %2 |
| + orh\t%0, hi(%2)\;orl\t%0, lo(%2) |
| + or\t%0, %1, %2" |
| + |
| + [(set_attr "length" "4,2,4,4,2,8,4") |
| + (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z,set_z")]) |
| + |
| + |
| +(define_insn "iordi3" |
| + [(set (match_operand:DI 0 "register_operand" "=&r,&r") |
| + (ior:DI (match_operand:DI 1 "register_operand" "%0,r") |
| + (match_operand:DI 2 "register_operand" "r,r")))] |
| + "" |
| + "#" |
| + [(set_attr "length" "8") |
| + (set_attr "cc" "clobber")] |
| +) |
| + |
| +;;============================================================================= |
| +;; xor bytes |
| +;;----------------------------------------------------------------------------- |
| +;; Store the result after a bitwise exclusive-or between reg0 and reg2 in reg0. |
| +;;============================================================================= |
| + |
| +(define_insn "xorsi3" |
| + [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r, r,r,r,r") |
| + (xor:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,0, 0,0,0,r" ) |
| + (match_operand:SI 2 "nonmemory_operand" " O,Ku16,J,r,i,r")))] |
| + "" |
| + "@ |
| + memt\t%0, %p2 |
| + eorl\t%0, %2 |
| + eorh\t%0, hi(%2) |
| + eor\t%0, %2 |
| + eorh\t%0, hi(%2)\;eorl\t%0, lo(%2) |
| + eor\t%0, %1, %2" |
| + |
| + [(set_attr "length" "4,4,4,2,8,4") |
| + (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z")]) |
| + |
| + |
| +(define_insn "xordi3" |
| + [(set (match_operand:DI 0 "register_operand" "=&r,&r") |
| + (xor:DI (match_operand:DI 1 "register_operand" "%0,r") |
| + (match_operand:DI 2 "register_operand" "r,r")))] |
| + "" |
| + "#" |
| + [(set_attr "length" "8") |
| + (set_attr "cc" "clobber")] |
| +) |
| + |
| +;;============================================================================= |
| +;; Three operand predicable insns |
| +;;============================================================================= |
| + |
| +(define_insn "<predicable_insn3><mode>_predicable" |
| + [(set (match_operand:INTM 0 "register_operand" "=r") |
| + (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r") |
| + (match_operand:INTM 2 "register_operand" "r")))] |
| + "TARGET_V2_INSNS" |
| + "<predicable_insn3>%?\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "predicable" "yes")] |
| +) |
| + |
| +(define_insn_and_split "<predicable_insn3><mode>_imm_clobber_predicable" |
| + [(parallel |
| + [(set (match_operand:INTM 0 "register_operand" "=r") |
| + (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r") |
| + (match_operand:INTM 2 "avr32_mov_immediate_operand" "JKs21"))) |
| + (clobber (match_operand:INTM 3 "register_operand" "=&r"))])] |
| + "TARGET_V2_INSNS" |
| + { |
| + if ( current_insn_predicate != NULL_RTX ) |
| + { |
| + if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") ) |
| + return "%! mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3"; |
| + else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") ) |
| + return "%! mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3"; |
| + else |
| + return "%! movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3"; |
| + } |
| + else |
| + { |
| + if ( !avr32_cond_imm_clobber_splittable (insn, operands) ) |
| + { |
| + if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") ) |
| + return "mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3"; |
| + else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") ) |
| + return "mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3"; |
| + else |
| + return "movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3"; |
| + } |
| + return "#"; |
| + } |
| + |
| + } |
| + ;; If we find out that we could not actually do if-conversion on the block |
| + ;; containing this insn we convert it back to normal immediate format |
| + ;; to avoid outputing a redundant move insn |
| + ;; Do not split until after we have checked if we can make the insn |
| + ;; conditional. |
| + "(GET_CODE (PATTERN (insn)) != COND_EXEC |
| + && cfun->machine->ifcvt_after_reload |
| + && avr32_cond_imm_clobber_splittable (insn, operands))" |
| + [(set (match_dup 0) |
| + (predicable_op3:INTM (match_dup 1) |
| + (match_dup 2)))] |
| + "" |
| + [(set_attr "length" "8") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "predicable" "yes")] |
| + ) |
| + |
| + |
| +;;============================================================================= |
| +;; Zero extend predicable insns |
| +;;============================================================================= |
| +(define_insn_and_split "zero_extendhisi_clobber_predicable" |
| + [(parallel |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (zero_extend:SI (match_operand:HI 1 "register_operand" "r"))) |
| + (clobber (match_operand:SI 2 "register_operand" "=&r"))])] |
| + "TARGET_V2_INSNS" |
| + { |
| + if ( current_insn_predicate != NULL_RTX ) |
| + { |
| + return "%! mov\t%2, 0xffff\;and%?\t%0, %1, %2"; |
| + } |
| + else |
| + { |
| + return "#"; |
| + } |
| + |
| + } |
| + ;; If we find out that we could not actually do if-conversion on the block |
| + ;; containing this insn we convert it back to normal immediate format |
| + ;; to avoid outputing a redundant move insn |
| + ;; Do not split until after we have checked if we can make the insn |
| + ;; conditional. |
| + "(GET_CODE (PATTERN (insn)) != COND_EXEC |
| + && cfun->machine->ifcvt_after_reload)" |
| + [(set (match_dup 0) |
| + (zero_extend:SI (match_dup 1)))] |
| + "" |
| + [(set_attr "length" "8") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "predicable" "yes")] |
| + ) |
| + |
| +(define_insn_and_split "zero_extendqisi_clobber_predicable" |
| + [(parallel |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (zero_extend:SI (match_operand:QI 1 "register_operand" "r"))) |
| + (clobber (match_operand:SI 2 "register_operand" "=&r"))])] |
| + "TARGET_V2_INSNS" |
| + { |
| + if ( current_insn_predicate != NULL_RTX ) |
| + { |
| + return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2"; |
| + } |
| + else |
| + { |
| + return "#"; |
| + } |
| + |
| + } |
| + ;; If we find out that we could not actually do if-conversion on the block |
| + ;; containing this insn we convert it back to normal immediate format |
| + ;; to avoid outputing a redundant move insn |
| + ;; Do not split until after we have checked if we can make the insn |
| + ;; conditional. |
| + "(GET_CODE (PATTERN (insn)) != COND_EXEC |
| + && cfun->machine->ifcvt_after_reload)" |
| + [(set (match_dup 0) |
| + (zero_extend:SI (match_dup 1)))] |
| + "" |
| + [(set_attr "length" "8") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "predicable" "yes")] |
| + ) |
| + |
| +(define_insn_and_split "zero_extendqihi_clobber_predicable" |
| + [(parallel |
| + [(set (match_operand:HI 0 "register_operand" "=r") |
| + (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))) |
| + (clobber (match_operand:SI 2 "register_operand" "=&r"))])] |
| + "TARGET_V2_INSNS" |
| + { |
| + if ( current_insn_predicate != NULL_RTX ) |
| + { |
| + return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2"; |
| + } |
| + else |
| + { |
| + return "#"; |
| + } |
| + |
| + } |
| + ;; If we find out that we could not actually do if-conversion on the block |
| + ;; containing this insn we convert it back to normal immediate format |
| + ;; to avoid outputing a redundant move insn |
| + ;; Do not split until after we have checked if we can make the insn |
| + ;; conditional. |
| + "(GET_CODE (PATTERN (insn)) != COND_EXEC |
| + && cfun->machine->ifcvt_after_reload)" |
| + [(set (match_dup 0) |
| + (zero_extend:HI (match_dup 1)))] |
| + "" |
| + [(set_attr "length" "8") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "predicable" "yes")] |
| + ) |
| +;;============================================================================= |
| +;; divmod |
| +;;----------------------------------------------------------------------------- |
| +;; Signed division that produces both a quotient and a remainder. |
| +;;============================================================================= |
| +(define_expand "divmodsi4" |
| + [(parallel [ |
| + (parallel [ |
| + (set (match_operand:SI 0 "register_operand" "=r") |
| + (div:SI (match_operand:SI 1 "register_operand" "r") |
| + (match_operand:SI 2 "register_operand" "r"))) |
| + (set (match_operand:SI 3 "register_operand" "=r") |
| + (mod:SI (match_dup 1) |
| + (match_dup 2)))]) |
| + (use (match_dup 4))])] |
| + "" |
| + { |
| + if (! no_new_pseudos) { |
| + operands[4] = gen_reg_rtx (DImode); |
| + |
| + emit_insn(gen_divmodsi4_internal(operands[4],operands[1],operands[2])); |
| + emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4)); |
| + emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0)); |
| + |
| + DONE; |
| + } else { |
| + FAIL; |
| + } |
| + |
| + }) |
| + |
| + |
| +(define_insn "divmodsi4_internal" |
| + [(set (match_operand:DI 0 "register_operand" "=r") |
| + (unspec:DI [(match_operand:SI 1 "register_operand" "r") |
| + (match_operand:SI 2 "register_operand" "r")] |
| + UNSPEC_DIVMODSI4_INTERNAL))] |
| + "" |
| + "divs %0, %1, %2" |
| + [(set_attr "type" "div") |
| + (set_attr "cc" "none")]) |
| + |
| + |
| +;;============================================================================= |
| +;; udivmod |
| +;;----------------------------------------------------------------------------- |
| +;; Unsigned division that produces both a quotient and a remainder. |
| +;;============================================================================= |
| +(define_expand "udivmodsi4" |
| + [(parallel [ |
| + (parallel [ |
| + (set (match_operand:SI 0 "register_operand" "=r") |
| + (udiv:SI (match_operand:SI 1 "register_operand" "r") |
| + (match_operand:SI 2 "register_operand" "r"))) |
| + (set (match_operand:SI 3 "register_operand" "=r") |
| + (umod:SI (match_dup 1) |
| + (match_dup 2)))]) |
| + (use (match_dup 4))])] |
| + "" |
| + { |
| + if (! no_new_pseudos) { |
| + operands[4] = gen_reg_rtx (DImode); |
| + |
| + emit_insn(gen_udivmodsi4_internal(operands[4],operands[1],operands[2])); |
| + emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4)); |
| + emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0)); |
| + |
| + DONE; |
| + } else { |
| + FAIL; |
| + } |
| + }) |
| + |
| +(define_insn "udivmodsi4_internal" |
| + [(set (match_operand:DI 0 "register_operand" "=r") |
| + (unspec:DI [(match_operand:SI 1 "register_operand" "r") |
| + (match_operand:SI 2 "register_operand" "r")] |
| + UNSPEC_UDIVMODSI4_INTERNAL))] |
| + "" |
| + "divu %0, %1, %2" |
| + [(set_attr "type" "div") |
| + (set_attr "cc" "none")]) |
| + |
| + |
| +;;============================================================================= |
| +;; Arithmetic-shift left |
| +;;----------------------------------------------------------------------------- |
| +;; Arithmetic-shift reg0 left by reg2 or immediate value. |
| +;;============================================================================= |
| + |
| +(define_insn "ashlsi3" |
| + [(set (match_operand:SI 0 "register_operand" "=r,r,r") |
| + (ashift:SI (match_operand:SI 1 "register_operand" "r,0,r") |
| + (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))] |
| + "" |
| + "@ |
| + lsl %0, %1, %2 |
| + lsl %0, %2 |
| + lsl %0, %1, %2" |
| + [(set_attr "length" "4,2,4") |
| + (set_attr "cc" "set_ncz")]) |
| + |
| +;;============================================================================= |
| +;; Arithmetic-shift right |
| +;;----------------------------------------------------------------------------- |
| +;; Arithmetic-shift reg0 right by an immediate value. |
| +;;============================================================================= |
| + |
| +(define_insn "ashrsi3" |
| + [(set (match_operand:SI 0 "register_operand" "=r,r,r") |
| + (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r") |
| + (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))] |
| + "" |
| + "@ |
| + asr %0, %1, %2 |
| + asr %0, %2 |
| + asr %0, %1, %2" |
| + [(set_attr "length" "4,2,4") |
| + (set_attr "cc" "set_ncz")]) |
| + |
| +;;============================================================================= |
| +;; Logical shift right |
| +;;----------------------------------------------------------------------------- |
| +;; Logical shift reg0 right by an immediate value. |
| +;;============================================================================= |
| + |
| +(define_insn "lshrsi3" |
| + [(set (match_operand:SI 0 "register_operand" "=r,r,r") |
| + (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r") |
| + (match_operand:SI 2 "nonmemory_operand" "r,Ku05,Ku05")))] |
| + "" |
| + "@ |
| + lsr %0, %1, %2 |
| + lsr %0, %2 |
| + lsr %0, %1, %2" |
| + [(set_attr "length" "4,2,4") |
| + (set_attr "cc" "set_ncz")]) |
| + |
| + |
| +;;============================================================================= |
| +;; neg |
| +;;----------------------------------------------------------------------------- |
| +;; Negate operand 1 and store the result in operand 0. |
| +;;============================================================================= |
| +(define_insn "negsi2" |
| + [(set (match_operand:SI 0 "register_operand" "=r,r") |
| + (neg:SI (match_operand:SI 1 "register_operand" "0,r")))] |
| + "" |
| + "@ |
| + neg\t%0 |
| + rsub\t%0, %1, 0" |
| + [(set_attr "length" "2,4") |
| + (set_attr "cc" "set_vncz")]) |
| + |
| +(define_insn "negsi2_predicable" |
| + [(set (match_operand:SI 0 "register_operand" "+r") |
| + (neg:SI (match_dup 0)))] |
| + "TARGET_V2_INSNS" |
| + "rsub%?\t%0, 0" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "predicable" "yes")]) |
| + |
| +;;============================================================================= |
| +;; abs |
| +;;----------------------------------------------------------------------------- |
| +;; Store the absolute value of operand 1 into operand 0. |
| +;;============================================================================= |
| +(define_insn "abssi2" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (abs:SI (match_operand:SI 1 "register_operand" "0")))] |
| + "" |
| + "abs\t%0" |
| + [(set_attr "length" "2") |
| + (set_attr "cc" "set_z")]) |
| + |
| + |
| +;;============================================================================= |
| +;; one_cmpl |
| +;;----------------------------------------------------------------------------- |
| +;; Store the bitwise-complement of operand 1 into operand 0. |
| +;;============================================================================= |
| + |
| +(define_insn "one_cmplsi2" |
| + [(set (match_operand:SI 0 "register_operand" "=r,r") |
| + (not:SI (match_operand:SI 1 "register_operand" "0,r")))] |
| + "" |
| + "@ |
| + com\t%0 |
| + rsub\t%0, %1, -1" |
| + [(set_attr "length" "2,4") |
| + (set_attr "cc" "set_z")]) |
| + |
| + |
| +(define_insn "one_cmplsi2_predicable" |
| + [(set (match_operand:SI 0 "register_operand" "+r") |
| + (not:SI (match_dup 0)))] |
| + "TARGET_V2_INSNS" |
| + "rsub%?\t%0, -1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "predicable" "yes")]) |
| + |
| + |
| +;;============================================================================= |
| +;; Bit load |
| +;;----------------------------------------------------------------------------- |
| +;; Load a bit into Z and C flags |
| +;;============================================================================= |
| +(define_insn "bldsi" |
| + [(set (cc0) |
| + (and:SI (match_operand:SI 0 "register_operand" "r") |
| + (match_operand:SI 1 "one_bit_set_operand" "i")))] |
| + "" |
| + "bld\t%0, %p1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "bld")] |
| + ) |
| + |
| + |
| +;;============================================================================= |
| +;; Compare |
| +;;----------------------------------------------------------------------------- |
| +;; Compare reg0 with reg1 or an immediate value. |
| +;;============================================================================= |
| + |
| +(define_expand "cmp<mode>" |
| + [(set (cc0) |
| + (compare:CMP |
| + (match_operand:CMP 0 "register_operand" "") |
| + (match_operand:CMP 1 "<CMP:cmp_predicate>" "")))] |
| + "" |
| + "{ |
| + avr32_compare_op0 = operands[0]; |
| + avr32_compare_op1 = operands[1]; |
| + }" |
| +) |
| + |
| +(define_insn "cmp<mode>_internal" |
| + [(set (cc0) |
| + (compare:CMP |
| + (match_operand:CMP 0 "register_operand" "r") |
| + (match_operand:CMP 1 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")))] |
| + "" |
| + { |
| + set_next_insn_cond(insn, |
| + avr32_output_cmp(get_next_insn_cond(insn), GET_MODE (operands[0]), operands[0], operands[1])); |
| + return ""; |
| + } |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "compare")]) |
| + |
| + |
| +;;;============================================================================= |
| +;; Test if zero |
| +;;----------------------------------------------------------------------------- |
| +;; Compare reg against zero and set the condition codes. |
| +;;============================================================================= |
| + |
| + |
| +(define_expand "tstsi" |
| + [(set (cc0) |
| + (match_operand:SI 0 "register_operand" ""))] |
| + "" |
| + { |
| + avr32_compare_op0 = operands[0]; |
| + avr32_compare_op1 = const0_rtx; |
| + } |
| +) |
| + |
| +(define_insn "tstsi_internal" |
| + [(set (cc0) |
| + (match_operand:SI 0 "register_operand" "r"))] |
| + "" |
| + { |
| + set_next_insn_cond(insn, |
| + avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], const0_rtx)); |
| + |
| + return ""; |
| + } |
| + [(set_attr "length" "2") |
| + (set_attr "cc" "compare")]) |
| + |
| + |
| +(define_expand "tstdi" |
| + [(set (cc0) |
| + (match_operand:DI 0 "register_operand" ""))] |
| + "" |
| + { |
| + avr32_compare_op0 = operands[0]; |
| + avr32_compare_op1 = const0_rtx; |
| + } |
| +) |
| + |
| +(define_insn "tstdi_internal" |
| + [(set (cc0) |
| + (match_operand:DI 0 "register_operand" "r"))] |
| + "" |
| + { |
| + set_next_insn_cond(insn, |
| + avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], const0_rtx)); |
| + return ""; |
| + } |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu2") |
| + (set_attr "cc" "compare")]) |
| + |
| + |
| + |
| +;;============================================================================= |
| +;; Convert operands |
| +;;----------------------------------------------------------------------------- |
| +;; |
| +;;============================================================================= |
| +(define_insn "truncdisi2" |
| + [(set (match_operand:SI 0 "general_operand" "") |
| + (truncate:SI (match_operand:DI 1 "general_operand" "")))] |
| + "" |
| + "truncdisi2") |
| + |
| +;;============================================================================= |
| +;; Extend |
| +;;----------------------------------------------------------------------------- |
| +;; |
| +;;============================================================================= |
| + |
| + |
| +(define_insn "extendhisi2" |
| + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") |
| + (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))] |
| + "" |
| + { |
| + switch ( which_alternative ){ |
| + case 0: |
| + return "casts.h\t%0"; |
| + case 1: |
| + return "bfexts\t%0, %1, 0, 16"; |
| + case 2: |
| + case 3: |
| + return "ld.sh\t%0, %1"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + [(set_attr "length" "2,4,2,4") |
| + (set_attr "cc" "set_ncz,set_ncz,none,none") |
| + (set_attr "type" "alu,alu,load_rm,load_rm")]) |
| + |
| +(define_insn "extendqisi2" |
| + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") |
| + (sign_extend:SI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))] |
| + "" |
| + { |
| + switch ( which_alternative ){ |
| + case 0: |
| + return "casts.b\t%0"; |
| + case 1: |
| + return "bfexts\t%0, %1, 0, 8"; |
| + case 2: |
| + case 3: |
| + return "ld.sb\t%0, %1"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + [(set_attr "length" "2,4,2,4") |
| + (set_attr "cc" "set_ncz,set_ncz,none,none") |
| + (set_attr "type" "alu,alu,load_rm,load_rm")]) |
| + |
| +(define_insn "extendqihi2" |
| + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r") |
| + (sign_extend:HI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))] |
| + "" |
| + { |
| + switch ( which_alternative ){ |
| + case 0: |
| + return "casts.b\t%0"; |
| + case 1: |
| + return "bfexts\t%0, %1, 0, 8"; |
| + case 2: |
| + case 3: |
| + return "ld.sb\t%0, %1"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + [(set_attr "length" "2,4,2,4") |
| + (set_attr "cc" "set_ncz,set_ncz,none,none") |
| + (set_attr "type" "alu,alu,load_rm,load_rm")]) |
| + |
| + |
| +;;============================================================================= |
| +;; Zero-extend |
| +;;----------------------------------------------------------------------------- |
| +;; |
| +;;============================================================================= |
| + |
| +(define_insn "zero_extendhisi2" |
| + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") |
| + (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))] |
| + "" |
| + { |
| + switch ( which_alternative ){ |
| + case 0: |
| + return "castu.h\t%0"; |
| + case 1: |
| + return "bfextu\t%0, %1, 0, 16"; |
| + case 2: |
| + case 3: |
| + return "ld.uh\t%0, %1"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + |
| + [(set_attr "length" "2,4,2,4") |
| + (set_attr "cc" "set_ncz,set_ncz,none,none") |
| + (set_attr "type" "alu,alu,load_rm,load_rm")]) |
| + |
| +(define_insn "zero_extendqisi2" |
| + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") |
| + (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))] |
| + "" |
| + { |
| + switch ( which_alternative ){ |
| + case 0: |
| + return "castu.b\t%0"; |
| + case 1: |
| + return "bfextu\t%0, %1, 0, 8"; |
| + case 2: |
| + case 3: |
| + return "ld.ub\t%0, %1"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + [(set_attr "length" "2,4,2,4") |
| + (set_attr "cc" "set_ncz, set_ncz, none, none") |
| + (set_attr "type" "alu, alu, load_rm, load_rm")]) |
| + |
| +(define_insn "zero_extendqihi2" |
| + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r") |
| + (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))] |
| + "" |
| + { |
| + switch ( which_alternative ){ |
| + case 0: |
| + return "castu.b\t%0"; |
| + case 1: |
| + return "bfextu\t%0, %1, 0, 8"; |
| + case 2: |
| + case 3: |
| + return "ld.ub\t%0, %1"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + [(set_attr "length" "2,4,2,4") |
| + (set_attr "cc" "set_ncz, set_ncz, none, none") |
| + (set_attr "type" "alu, alu, load_rm, load_rm")]) |
| + |
| + |
| +;;============================================================================= |
| +;; Conditional load and extend insns |
| +;;============================================================================= |
| +(define_insn "ldsi<mode>_predicable_se" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (sign_extend:SI |
| + (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))] |
| + "TARGET_V2_INSNS" |
| + "ld<INTM:load_postfix_s>%?\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "type" "load") |
| + (set_attr "predicable" "yes")] |
| +) |
| + |
| +(define_insn "ldsi<mode>_predicable_ze" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (zero_extend:SI |
| + (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))] |
| + "TARGET_V2_INSNS" |
| + "ld<INTM:load_postfix_u>%?\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "type" "load") |
| + (set_attr "predicable" "yes")] |
| +) |
| + |
| +(define_insn "ldhi_predicable_ze" |
| + [(set (match_operand:HI 0 "register_operand" "=r") |
| + (zero_extend:HI |
| + (match_operand:QI 1 "memory_operand" "RKs10")))] |
| + "TARGET_V2_INSNS" |
| + "ld.ub%?\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "type" "load") |
| + (set_attr "predicable" "yes")] |
| +) |
| + |
| +(define_insn "ldhi_predicable_se" |
| + [(set (match_operand:HI 0 "register_operand" "=r") |
| + (sign_extend:HI |
| + (match_operand:QI 1 "memory_operand" "RKs10")))] |
| + "TARGET_V2_INSNS" |
| + "ld.sb%?\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "cmp_cond_insn") |
| + (set_attr "type" "load") |
| + (set_attr "predicable" "yes")] |
| +) |
| + |
| +;;============================================================================= |
| +;; Conditional set register |
| +;; sr{cond4} rd |
| +;;----------------------------------------------------------------------------- |
| + |
| +;;Because of the same issue as with conditional moves and adds we must |
| +;;not separate the compare instrcution from the scc instruction as |
| +;;they might be sheduled "badly". |
| + |
| +(define_insn "s<code>" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (any_cond:SI (cc0) |
| + (const_int 0)))] |
| + "" |
| + "sr<cond>\t%0" |
| + [(set_attr "length" "2") |
| + (set_attr "cc" "none")]) |
| + |
| +(define_insn "smi" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (unspec:SI [(cc0) |
| + (const_int 0)] UNSPEC_COND_MI))] |
| + "" |
| + "srmi\t%0" |
| + [(set_attr "length" "2") |
| + (set_attr "cc" "none")]) |
| + |
| +(define_insn "spl" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (unspec:SI [(cc0) |
| + (const_int 0)] UNSPEC_COND_PL))] |
| + "" |
| + "srpl\t%0" |
| + [(set_attr "length" "2") |
| + (set_attr "cc" "none")]) |
| + |
| + |
| +;;============================================================================= |
| +;; Conditional branch |
| +;;----------------------------------------------------------------------------- |
| +;; Branch to label if the specified condition codes are set. |
| +;;============================================================================= |
| +; branch if negative |
| +(define_insn "bmi" |
| + [(set (pc) |
| + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI) |
| + (label_ref (match_operand 0 "" "")) |
| + (pc)))] |
| + "" |
| + "brmi %0" |
| + [(set_attr "type" "branch") |
| + (set (attr "length") |
| + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254)) |
| + (le (minus (pc) (match_dup 0)) (const_int 256))) |
| + (const_int 2)] ; use compact branch |
| + (const_int 4))) ; use extended branch |
| + (set_attr "cc" "none")]) |
| + |
| +(define_insn "*bmi-reverse" |
| + [(set (pc) |
| + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI) |
| + (pc) |
| + (label_ref (match_operand 0 "" ""))))] |
| + "" |
| + "brpl %0" |
| + [(set_attr "type" "branch") |
| + (set (attr "length") |
| + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254)) |
| + (le (minus (pc) (match_dup 0)) (const_int 256))) |
| + (const_int 2)] ; use compact branch |
| + (const_int 4))) ; use extended branch |
| + (set_attr "cc" "none")]) |
| + |
| +; branch if positive |
| +(define_insn "bpl" |
| + [(set (pc) |
| + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL) |
| + (label_ref (match_operand 0 "" "")) |
| + (pc)))] |
| + "" |
| + "brpl %0" |
| + [(set_attr "type" "branch") |
| + (set (attr "length") |
| + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254)) |
| + (le (minus (pc) (match_dup 0)) (const_int 256))) |
| + (const_int 2)] ; use compact branch |
| + (const_int 4))) ; use extended branch |
| + (set_attr "cc" "none")]) |
| + |
| +(define_insn "*bpl-reverse" |
| + [(set (pc) |
| + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL) |
| + (pc) |
| + (label_ref (match_operand 0 "" ""))))] |
| + "" |
| + "brmi %0" |
| + [(set_attr "type" "branch") |
| + (set (attr "length") |
| + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254)) |
| + (le (minus (pc) (match_dup 0)) (const_int 256))) |
| + (const_int 2)] ; use compact branch |
| + (const_int 4))) ; use extended branch |
| + (set_attr "cc" "none")]) |
| + |
| +; branch if equal |
| +(define_insn "b<code>" |
| + [(set (pc) |
| + (if_then_else (any_cond:CC (cc0) |
| + (const_int 0)) |
| + (label_ref (match_operand 0 "" "")) |
| + (pc)))] |
| + "" |
| + "br<cond> %0 " |
| + [(set_attr "type" "branch") |
| + (set (attr "length") |
| + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254)) |
| + (le (minus (pc) (match_dup 0)) (const_int 256))) |
| + (const_int 2)] ; use compact branch |
| + (const_int 4))) ; use extended branch |
| + (set_attr "cc" "none")]) |
| + |
| + |
| +(define_insn "*b<code>-reverse" |
| + [(set (pc) |
| + (if_then_else (any_cond:CC (cc0) |
| + (const_int 0)) |
| + (pc) |
| + (label_ref (match_operand 0 "" ""))))] |
| + "" |
| + "br<invcond> %0 " |
| + [(set_attr "type" "branch") |
| + (set (attr "length") |
| + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254)) |
| + (le (minus (pc) (match_dup 0)) (const_int 256))) |
| + (const_int 2)] ; use compact branch |
| + (const_int 4))) ; use extended branch |
| + (set_attr "cc" "none")]) |
| + |
| + |
| + |
| +;============================================================================= |
| +; Conditional Add/Subtract |
| +;----------------------------------------------------------------------------- |
| +; sub{cond4} Rd, imm |
| +;============================================================================= |
| + |
| + |
| +(define_expand "add<mode>cc" |
| + [(set (match_operand:ADDCC 0 "register_operand" "") |
| + (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator" |
| + [(match_dup 4) |
| + (match_dup 5)]) |
| + (match_operand:ADDCC 2 "register_operand" "") |
| + (plus:ADDCC |
| + (match_dup 2) |
| + (match_operand:ADDCC 3 "" ""))))] |
| + "" |
| + { |
| + if ( !(GET_CODE (operands[3]) == CONST_INT |
| + || (TARGET_V2_INSNS && REG_P(operands[3]))) ){ |
| + FAIL; |
| + } |
| + |
| + /* Delete compare instruction as it is merged into this instruction */ |
| + remove_insn (get_last_insn_anywhere ()); |
| + |
| + operands[4] = avr32_compare_op0; |
| + operands[5] = avr32_compare_op1; |
| + |
| + if ( TARGET_V2_INSNS |
| + && REG_P(operands[3]) |
| + && REGNO(operands[0]) != REGNO(operands[2]) ){ |
| + emit_move_insn (operands[0], operands[2]); |
| + operands[2] = operands[0]; |
| + } |
| + } |
| + ) |
| + |
| +(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>_reg" |
| + [(set (match_operand:ADDCC 0 "register_operand" "=r") |
| + (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator" |
| + [(match_operand:CMP 4 "register_operand" "r") |
| + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")]) |
| + (match_dup 0) |
| + (plus:ADDCC |
| + (match_operand:ADDCC 2 "register_operand" "r") |
| + (match_operand:ADDCC 3 "register_operand" "r"))))] |
| + "TARGET_V2_INSNS" |
| + { |
| + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]); |
| + return "add%i1\t%0, %2, %3"; |
| + } |
| + [(set_attr "length" "8") |
| + (set_attr "cc" "cmp_cond_insn")]) |
| + |
| +(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>" |
| + [(set (match_operand:ADDCC 0 "register_operand" "=r") |
| + (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator" |
| + [(match_operand:CMP 4 "register_operand" "r") |
| + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")]) |
| + (match_operand:ADDCC 2 "register_operand" "0") |
| + (plus:ADDCC |
| + (match_dup 2) |
| + (match_operand:ADDCC 3 "avr32_cond_immediate_operand" "Is08"))))] |
| + "" |
| + { |
| + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]); |
| + return "sub%i1\t%0, -%3"; |
| + } |
| + [(set_attr "length" "8") |
| + (set_attr "cc" "cmp_cond_insn")]) |
| + |
| +;============================================================================= |
| +; Conditional Move |
| +;----------------------------------------------------------------------------- |
| +; mov{cond4} Rd, (Rs/imm) |
| +;============================================================================= |
| +(define_expand "mov<mode>cc" |
| + [(set (match_operand:MOVCC 0 "register_operand" "") |
| + (if_then_else:MOVCC (match_operator 1 "avr32_comparison_operator" |
| + [(match_dup 4) |
| + (match_dup 5)]) |
| + (match_operand:MOVCC 2 "avr32_cond_register_immediate_operand" "") |
| + (match_operand:MOVCC 3 "avr32_cond_register_immediate_operand" "")))] |
| + "" |
| + { |
| + /* Delete compare instruction as it is merged into this instruction */ |
| + remove_insn (get_last_insn_anywhere ()); |
| + |
| + operands[4] = avr32_compare_op0; |
| + operands[5] = avr32_compare_op1; |
| + } |
| + ) |
| + |
| + |
| +(define_insn "mov<MOVCC:mode>cc_cmp<CMP:mode>" |
| + [(set (match_operand:MOVCC 0 "register_operand" "=r,r,r") |
| + (if_then_else:MOVCC (match_operator 1 "avr32_comparison_operator" |
| + [(match_operand:CMP 4 "register_operand" "r,r,r") |
| + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>,<CMP:cmp_constraint>,<CMP:cmp_constraint>")]) |
| + (match_operand:MOVCC 2 "avr32_cond_register_immediate_operand" "0, rKs08,rKs08") |
| + (match_operand:MOVCC 3 "avr32_cond_register_immediate_operand" "rKs08,0,rKs08")))] |
| + "" |
| + { |
| + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]); |
| + |
| + switch( which_alternative ){ |
| + case 0: |
| + return "mov%i1 %0, %3"; |
| + case 1: |
| + return "mov%1 %0, %2"; |
| + case 2: |
| + return "mov%1 %0, %2\;mov%i1 %0, %3"; |
| + default: |
| + abort(); |
| + } |
| + |
| + } |
| + [(set_attr "length" "8,8,12") |
| + (set_attr "cc" "cmp_cond_insn")]) |
| + |
| + |
| + |
| + |
| +;;============================================================================= |
| +;; jump |
| +;;----------------------------------------------------------------------------- |
| +;; Jump inside a function; an unconditional branch to a label. |
| +;;============================================================================= |
| +(define_insn "jump" |
| + [(set (pc) |
| + (label_ref (match_operand 0 "" "")))] |
| + "" |
| + { |
| + if (get_attr_length(insn) > 4) |
| + return "Can't jump this far"; |
| + return (get_attr_length(insn) == 2 ? |
| + "rjmp %0" : "bral %0"); |
| + } |
| + [(set_attr "type" "branch") |
| + (set (attr "length") |
| + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 1022)) |
| + (le (minus (pc) (match_dup 0)) (const_int 1024))) |
| + (const_int 2) ; use rjmp |
| + (le (match_dup 0) (const_int 1048575)) |
| + (const_int 4)] ; use bral |
| + (const_int 8))) ; do something else |
| + (set_attr "cc" "none")]) |
| + |
| +;;============================================================================= |
| +;; call |
| +;;----------------------------------------------------------------------------- |
| +;; Subroutine call instruction returning no value. |
| +;;============================================================================= |
| +(define_insn "call_internal" |
| + [(parallel [(call (mem:SI (match_operand:SI 0 "avr32_call_operand" "r,U,T,W")) |
| + (match_operand 1 "" "")) |
| + (clobber (reg:SI LR_REGNUM))])] |
| + "" |
| + { |
| + switch (which_alternative){ |
| + case 0: |
| + return "icall\t%0"; |
| + case 1: |
| + return "rcall\t%0"; |
| + case 2: |
| + return "mcall\t%0"; |
| + case 3: |
| + if ( TARGET_HAS_ASM_ADDR_PSEUDOS ) |
| + return "call\t%0"; |
| + else |
| + return "mcall\tr6[%0@got]"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + [(set_attr "type" "call") |
| + (set_attr "length" "2,4,4,10") |
| + (set_attr "cc" "clobber")]) |
| + |
| + |
| +(define_expand "call" |
| + [(parallel [(call (match_operand:SI 0 "" "") |
| + (match_operand 1 "" "")) |
| + (clobber (reg:SI LR_REGNUM))])] |
| + "" |
| + { |
| + rtx call_address; |
| + if ( GET_CODE(operands[0]) != MEM ) |
| + FAIL; |
| + |
| + call_address = XEXP(operands[0], 0); |
| + |
| + /* If assembler supports call pseudo insn and the call |
| + address is a symbol then nothing special needs to be done. */ |
| + if ( TARGET_HAS_ASM_ADDR_PSEUDOS |
| + && (GET_CODE(call_address) == SYMBOL_REF) ){ |
| + /* We must however mark the function as using the GOT if |
| + flag_pic is set, since the call insn might turn into |
| + a mcall using the GOT ptr register. */ |
| + if ( flag_pic ){ |
| + current_function_uses_pic_offset_table = 1; |
| + emit_call_insn(gen_call_internal(call_address, operands[1])); |
| + DONE; |
| + } |
| + } else { |
| + if ( flag_pic && |
| + GET_CODE(call_address) == SYMBOL_REF ){ |
| + current_function_uses_pic_offset_table = 1; |
| + emit_call_insn(gen_call_internal(call_address, operands[1])); |
| + DONE; |
| + } |
| + |
| + if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[0]) ){ |
| + if ( optimize_size && |
| + GET_CODE(call_address) == SYMBOL_REF ){ |
| + call_address = force_const_mem(SImode, call_address); |
| + } else { |
| + call_address = force_reg(SImode, call_address); |
| + } |
| + } |
| + } |
| + emit_call_insn(gen_call_internal(call_address, operands[1])); |
| + DONE; |
| + } |
| +) |
| + |
| +;;============================================================================= |
| +;; call_value |
| +;;----------------------------------------------------------------------------- |
| +;; Subrutine call instruction returning a value. |
| +;;============================================================================= |
| +(define_expand "call_value" |
| + [(parallel [(set (match_operand:SI 0 "" "") |
| + (call (match_operand:SI 1 "" "") |
| + (match_operand 2 "" ""))) |
| + (clobber (reg:SI LR_REGNUM))])] |
| + "" |
| + { |
| + rtx call_address; |
| + if ( GET_CODE(operands[1]) != MEM ) |
| + FAIL; |
| + |
| + call_address = XEXP(operands[1], 0); |
| + |
| + /* If assembler supports call pseudo insn and the call |
| + address is a symbol then nothing special needs to be done. */ |
| + if ( TARGET_HAS_ASM_ADDR_PSEUDOS |
| + && (GET_CODE(call_address) == SYMBOL_REF) ){ |
| + /* We must however mark the function as using the GOT if |
| + flag_pic is set, since the call insn might turn into |
| + a mcall using the GOT ptr register. */ |
| + if ( flag_pic ) { |
| + current_function_uses_pic_offset_table = 1; |
| + emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2])); |
| + DONE; |
| + } |
| + } else { |
| + if ( flag_pic && |
| + GET_CODE(call_address) == SYMBOL_REF ){ |
| + current_function_uses_pic_offset_table = 1; |
| + emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2])); |
| + DONE; |
| + } |
| + |
| + if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[1]) ){ |
| + if ( optimize_size && |
| + GET_CODE(call_address) == SYMBOL_REF){ |
| + call_address = force_const_mem(SImode, call_address); |
| + } else { |
| + call_address = force_reg(SImode, call_address); |
| + } |
| + } |
| + } |
| + emit_call_insn(gen_call_value_internal(operands[0], call_address, |
| + operands[2])); |
| + DONE; |
| + |
| + }) |
| + |
| +(define_insn "call_value_internal" |
| + [(parallel [(set (match_operand 0 "register_operand" "=r,r,r,r") |
| + (call (mem:SI (match_operand:SI 1 "avr32_call_operand" "r,U,T,W")) |
| + (match_operand 2 "" ""))) |
| + (clobber (reg:SI LR_REGNUM))])] |
| + ;; Operand 2 not used on the AVR32. |
| + "" |
| + { |
| + switch (which_alternative){ |
| + case 0: |
| + return "icall\t%1"; |
| + case 1: |
| + return "rcall\t%1"; |
| + case 2: |
| + return "mcall\t%1"; |
| + case 3: |
| + if ( TARGET_HAS_ASM_ADDR_PSEUDOS ) |
| + return "call\t%1"; |
| + else |
| + return "mcall\tr6[%1@got]"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + [(set_attr "type" "call") |
| + (set_attr "length" "2,4,4,10") |
| + (set_attr "cc" "call_set")]) |
| + |
| + |
| +;;============================================================================= |
| +;; untyped_call |
| +;;----------------------------------------------------------------------------- |
| +;; Subrutine call instruction returning a value of any type. |
| +;; The code is copied from m68k.md (except gen_blockage is removed) |
| +;; Fixme! |
| +;;============================================================================= |
| +(define_expand "untyped_call" |
| + [(parallel [(call (match_operand 0 "avr32_call_operand" "") |
| + (const_int 0)) |
| + (match_operand 1 "" "") |
| + (match_operand 2 "" "")])] |
| + "" |
| + { |
| + int i; |
| + |
| + emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx)); |
| + |
| + for (i = 0; i < XVECLEN (operands[2], 0); i++) { |
| + rtx set = XVECEXP (operands[2], 0, i); |
| + emit_move_insn (SET_DEST (set), SET_SRC (set)); |
| + } |
| + |
| + /* The optimizer does not know that the call sets the function value |
| + registers we stored in the result block. We avoid problems by |
| + claiming that all hard registers are used and clobbered at this |
| + point. */ |
| + emit_insn (gen_blockage ()); |
| + |
| + DONE; |
| + }) |
| + |
| + |
| +;;============================================================================= |
| +;; return |
| +;;============================================================================= |
| + |
| +(define_insn "return" |
| + [(return)] |
| + "USE_RETURN_INSN (FALSE)" |
| + { |
| + avr32_output_return_instruction(TRUE, FALSE, NULL, NULL); |
| + return ""; |
| + } |
| + [(set_attr "length" "4") |
| + (set_attr "type" "call")] |
| + ) |
| + |
| + |
| +(define_insn "return_cond" |
| + [(set (pc) |
| + (if_then_else (match_operand 0 "avr32_comparison_operand" "") |
| + (return) |
| + (pc)))] |
| + "USE_RETURN_INSN (TRUE)" |
| + "ret%0\tr12"; |
| + [(set_attr "type" "call")]) |
| + |
| +(define_insn "return_cond_predicable" |
| + [(return)] |
| + "USE_RETURN_INSN (TRUE)" |
| + "ret%?\tr12"; |
| + [(set_attr "type" "call") |
| + (set_attr "predicable" "yes")]) |
| + |
| + |
| +(define_insn "return_imm" |
| + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i")) |
| + (use (reg RETVAL_REGNUM)) |
| + (return)])] |
| + "USE_RETURN_INSN (FALSE) && |
| + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))" |
| + { |
| + avr32_output_return_instruction(TRUE, FALSE, NULL, operands[0]); |
| + return ""; |
| + } |
| + [(set_attr "length" "4") |
| + (set_attr "type" "call")] |
| + ) |
| + |
| +(define_insn "return_imm_cond" |
| + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i")) |
| + (use (reg RETVAL_REGNUM)) |
| + (set (pc) |
| + (if_then_else (match_operand 1 "avr32_comparison_operand" "") |
| + (return) |
| + (pc)))])] |
| + "USE_RETURN_INSN (TRUE) && |
| + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))" |
| + "ret%1\t%0"; |
| + [(set_attr "type" "call")] |
| + ) |
| + |
| +(define_insn "return_imm_predicable" |
| + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i")) |
| + (use (reg RETVAL_REGNUM)) |
| + (return)])] |
| + "USE_RETURN_INSN (TRUE) && |
| + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))" |
| + "ret%?\t%0"; |
| + [(set_attr "type" "call") |
| + (set_attr "predicable" "yes")]) |
| + |
| +(define_insn "return_<mode>reg" |
| + [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r")) |
| + (use (reg RETVAL_REGNUM)) |
| + (return)] |
| + "USE_RETURN_INSN (TRUE)" |
| + "ret%?\t%0"; |
| + [(set_attr "type" "call") |
| + (set_attr "predicable" "yes")]) |
| + |
| +(define_insn "return_<mode>reg_cond" |
| + [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r")) |
| + (use (reg RETVAL_REGNUM)) |
| + (set (pc) |
| + (if_then_else (match_operator 1 "avr32_comparison_operator" |
| + [(cc0) (const_int 0)]) |
| + (return) |
| + (pc)))] |
| + "USE_RETURN_INSN (TRUE)" |
| + "ret%1\t%0"; |
| + [(set_attr "type" "call")]) |
| + |
| +;;============================================================================= |
| +;; nop |
| +;;----------------------------------------------------------------------------- |
| +;; No-op instruction. |
| +;;============================================================================= |
| +(define_insn "nop" |
| + [(const_int 0)] |
| + "" |
| + "nop" |
| + [(set_attr "length" "2") |
| + (set_attr "type" "alu") |
| + (set_attr "cc" "none")]) |
| + |
| +;;============================================================================= |
| +;; nonlocal_goto_receiver |
| +;;----------------------------------------------------------------------------- |
| +;; For targets with a return stack we must make sure to flush the return stack |
| +;; since it will be corrupt after a nonlocal goto. |
| +;;============================================================================= |
| +(define_expand "nonlocal_goto_receiver" |
| + [(const_int 0)] |
| + "TARGET_RETURN_STACK" |
| + " |
| + { |
| + emit_insn ( gen_frs() ); |
| + DONE; |
| + } |
| + " |
| + ) |
| + |
| + |
| +;;============================================================================= |
| +;; builtin_setjmp_receiver |
| +;;----------------------------------------------------------------------------- |
| +;; For pic code we need to reload the pic register. |
| +;; For targets with a return stack we must make sure to flush the return stack |
| +;; since it will probably be corrupted. |
| +;;============================================================================= |
| +(define_expand "builtin_setjmp_receiver" |
| + [(label_ref (match_operand 0 "" ""))] |
| + "flag_pic" |
| + " |
| + { |
| + if ( TARGET_RETURN_STACK ) |
| + emit_insn ( gen_frs() ); |
| + |
| + avr32_load_pic_register (); |
| + DONE; |
| + } |
| + " |
| +) |
| + |
| + |
| +;;============================================================================= |
| +;; indirect_jump |
| +;;----------------------------------------------------------------------------- |
| +;; Jump to an address in reg or memory. |
| +;;============================================================================= |
| +(define_expand "indirect_jump" |
| + [(set (pc) |
| + (match_operand:SI 0 "general_operand" ""))] |
| + "" |
| + { |
| + /* One of the ops has to be in a register. */ |
| + if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS ) |
| + && !avr32_legitimate_pic_operand_p(operands[0]) ) |
| + operands[0] = legitimize_pic_address (operands[0], SImode, 0); |
| + else if ( flag_pic && avr32_address_operand(operands[0], GET_MODE(operands[0])) ) |
| + /* If we have an address operand then this function uses the pic register. */ |
| + current_function_uses_pic_offset_table = 1; |
| + }) |
| + |
| + |
| +(define_insn "indirect_jump_internal" |
| + [(set (pc) |
| + (match_operand:SI 0 "avr32_non_rmw_general_operand" "r,m,W"))] |
| + "" |
| + { |
| + switch( which_alternative ){ |
| + case 0: |
| + return "mov\tpc, %0"; |
| + case 1: |
| + if ( avr32_const_pool_ref_operand(operands[0], GET_MODE(operands[0])) ) |
| + return "lddpc\tpc, %0"; |
| + else |
| + return "ld.w\tpc, %0"; |
| + case 2: |
| + if ( flag_pic ) |
| + return "ld.w\tpc, r6[%0@got]"; |
| + else |
| + return "lda.w\tpc, %0"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + [(set_attr "length" "2,4,8") |
| + (set_attr "type" "call,call,call") |
| + (set_attr "cc" "none,none,clobber")]) |
| + |
| + |
| + |
| +;;============================================================================= |
| +;; casesi and tablejump |
| +;;============================================================================= |
| +(define_insn "tablejump_add" |
| + [(set (pc) |
| + (plus:SI (match_operand:SI 0 "register_operand" "r") |
| + (mult:SI (match_operand:SI 1 "register_operand" "r") |
| + (match_operand:SI 2 "immediate_operand" "Ku04" )))) |
| + (use (label_ref (match_operand 3 "" "")))] |
| + "flag_pic && |
| + ((INTVAL(operands[2]) == 0) || (INTVAL(operands[2]) == 2) || |
| + (INTVAL(operands[2]) == 4) || (INTVAL(operands[2]) == 8))" |
| + "add\tpc, %0, %1 << %p2" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "clobber")]) |
| + |
| +(define_insn "tablejump_insn" |
| + [(set (pc) (match_operand:SI 0 "memory_operand" "m")) |
| + (use (label_ref (match_operand 1 "" "")))] |
| + "!flag_pic" |
| + "ld.w\tpc, %0" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "call") |
| + (set_attr "cc" "none")]) |
| + |
| +(define_expand "casesi" |
| + [(match_operand:SI 0 "register_operand" "") ; index to jump on |
| + (match_operand:SI 1 "const_int_operand" "") ; lower bound |
| + (match_operand:SI 2 "const_int_operand" "") ; total range |
| + (match_operand:SI 3 "" "") ; table label |
| + (match_operand:SI 4 "" "")] ; Out of range label |
| + "" |
| + " |
| + { |
| + rtx reg; |
| + rtx index = operands[0]; |
| + rtx low_bound = operands[1]; |
| + rtx range = operands[2]; |
| + rtx table_label = operands[3]; |
| + rtx oor_label = operands[4]; |
| + |
| + index = force_reg ( SImode, index ); |
| + if (low_bound != const0_rtx) |
| + { |
| + if (!avr32_const_ok_for_constraint_p(INTVAL (low_bound), 'I', \"Is21\")){ |
| + reg = force_reg(SImode, GEN_INT (INTVAL (low_bound))); |
| + emit_insn (gen_subsi3 (reg, index, |
| + reg)); |
| + } else { |
| + reg = gen_reg_rtx (SImode); |
| + emit_insn (gen_addsi3 (reg, index, |
| + GEN_INT (-INTVAL (low_bound)))); |
| + } |
| + index = reg; |
| + } |
| + |
| + if (!avr32_const_ok_for_constraint_p (INTVAL (range), 'K', \"Ks21\")) |
| + range = force_reg (SImode, range); |
| + |
| + emit_cmp_and_jump_insns ( index, range, GTU, NULL_RTX, SImode, 1, oor_label ); |
| + reg = gen_reg_rtx (SImode); |
| + emit_move_insn ( reg, gen_rtx_LABEL_REF (VOIDmode, table_label)); |
| + |
| + if ( flag_pic ) |
| + emit_jump_insn ( gen_tablejump_add ( reg, index, GEN_INT(4), table_label)); |
| + else |
| + emit_jump_insn ( |
| + gen_tablejump_insn ( gen_rtx_MEM ( SImode, |
| + gen_rtx_PLUS ( SImode, |
| + reg, |
| + gen_rtx_MULT ( SImode, |
| + index, |
| + GEN_INT(4)))), |
| + table_label)); |
| + DONE; |
| + }" |
| +) |
| + |
| + |
| + |
| +(define_insn "prefetch" |
| + [(prefetch (match_operand:SI 0 "avr32_ks16_address_operand" "p") |
| + (match_operand 1 "const_int_operand" "") |
| + (match_operand 2 "const_int_operand" ""))] |
| + "" |
| + { |
| + return "pref\t%0"; |
| + } |
| + |
| + [(set_attr "length" "4") |
| + (set_attr "type" "load") |
| + (set_attr "cc" "none")]) |
| + |
| + |
| + |
| +;;============================================================================= |
| +;; prologue |
| +;;----------------------------------------------------------------------------- |
| +;; This pattern, if defined, emits RTL for entry to a function. The function |
| +;; entry i responsible for setting up the stack frame, initializing the frame |
| +;; pointer register, saving callee saved registers, etc. |
| +;;============================================================================= |
| +(define_expand "prologue" |
| + [(clobber (const_int 0))] |
| + "" |
| + " |
| + avr32_expand_prologue(); |
| + DONE; |
| + " |
| + ) |
| + |
| +;;============================================================================= |
| +;; eh_return |
| +;;----------------------------------------------------------------------------- |
| +;; This pattern, if defined, affects the way __builtin_eh_return, and |
| +;; thence the call frame exception handling library routines, are |
| +;; built. It is intended to handle non-trivial actions needed along |
| +;; the abnormal return path. |
| +;; |
| +;; The address of the exception handler to which the function should |
| +;; return is passed as operand to this pattern. It will normally need |
| +;; to copied by the pattern to some special register or memory |
| +;; location. If the pattern needs to determine the location of the |
| +;; target call frame in order to do so, it may use |
| +;; EH_RETURN_STACKADJ_RTX, if defined; it will have already been |
| +;; assigned. |
| +;; |
| +;; If this pattern is not defined, the default action will be to |
| +;; simply copy the return address to EH_RETURN_HANDLER_RTX. Either |
| +;; that macro or this pattern needs to be defined if call frame |
| +;; exception handling is to be used. |
| + |
| +;; We can't expand this before we know where the link register is stored. |
| +(define_insn_and_split "eh_return" |
| + [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")] |
| + VUNSPEC_EH_RETURN) |
| + (clobber (match_scratch:SI 1 "=&r"))] |
| + "" |
| + "#" |
| + "reload_completed" |
| + [(const_int 0)] |
| + " |
| + { |
| + avr32_set_return_address (operands[0], operands[1]); |
| + DONE; |
| + }" |
| + ) |
| + |
| + |
| +;;============================================================================= |
| +;; ffssi2 |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "ffssi2" |
| + [ (set (match_operand:SI 0 "register_operand" "=r") |
| + (ffs:SI (match_operand:SI 1 "register_operand" "r"))) ] |
| + "" |
| + "mov %0, %1 |
| + brev %0 |
| + clz %0, %0 |
| + sub %0, -1 |
| + cp %0, 33 |
| + moveq %0, 0" |
| + [(set_attr "length" "18") |
| + (set_attr "cc" "clobber")] |
| + ) |
| + |
| + |
| + |
| +;;============================================================================= |
| +;; swap_h |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "*swap_h" |
| + [ (set (match_operand:SI 0 "register_operand" "=r") |
| + (ior:SI (ashift:SI (match_dup 0) (const_int 16)) |
| + (lshiftrt:SI (match_dup 0) (const_int 16))))] |
| + "" |
| + "swap.h %0" |
| + [(set_attr "length" "2")] |
| + ) |
| + |
| +(define_insn_and_split "bswap_16" |
| + [ (set (match_operand:HI 0 "avr32_bswap_operand" "=r,RKs13,r") |
| + (ior:HI (and:HI (lshiftrt:HI (match_operand:HI 1 "avr32_bswap_operand" "r,r,RKs13") |
| + (const_int 8)) |
| + (const_int 255)) |
| + (ashift:HI (and:HI (match_dup 1) |
| + (const_int 255)) |
| + (const_int 8))))] |
| + "" |
| + { |
| + switch ( which_alternative ){ |
| + case 0: |
| + if ( REGNO(operands[0]) == REGNO(operands[1])) |
| + return "swap.bh\t%0"; |
| + else |
| + return "mov\t%0, %1\;swap.bh\t%0"; |
| + case 1: |
| + return "stswp.h\t%0, %1"; |
| + case 2: |
| + return "ldswp.sh\t%0, %1"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + |
| + "(reload_completed && |
| + REG_P(operands[0]) && REG_P(operands[1]) |
| + && (REGNO(operands[0]) != REGNO(operands[1])))" |
| + [(set (match_dup 0) (match_dup 1)) |
| + (set (match_dup 0) |
| + (ior:HI (and:HI (lshiftrt:HI (match_dup 0) |
| + (const_int 8)) |
| + (const_int 255)) |
| + (ashift:HI (and:HI (match_dup 0) |
| + (const_int 255)) |
| + (const_int 8))))] |
| + "" |
| + |
| + [(set_attr "length" "4,4,4") |
| + (set_attr "type" "alu,store,load_rm")] |
| + ) |
| + |
| +(define_insn_and_split "bswap_32" |
| + [ (set (match_operand:SI 0 "avr32_bswap_operand" "=r,RKs14,r") |
| + (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_operand:SI 1 "avr32_bswap_operand" "r,r,RKs14") |
| + (const_int -16777216)) |
| + (const_int 24)) |
| + (lshiftrt:SI (and:SI (match_dup 1) |
| + (const_int 16711680)) |
| + (const_int 8))) |
| + (ior:SI (ashift:SI (and:SI (match_dup 1) |
| + (const_int 65280)) |
| + (const_int 8)) |
| + (ashift:SI (and:SI (match_dup 1) |
| + (const_int 255)) |
| + (const_int 24)))))] |
| + "" |
| + { |
| + switch ( which_alternative ){ |
| + case 0: |
| + if ( REGNO(operands[0]) == REGNO(operands[1])) |
| + return "swap.b\t%0"; |
| + else |
| + return "#"; |
| + case 1: |
| + return "stswp.w\t%0, %1"; |
| + case 2: |
| + return "ldswp.w\t%0, %1"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + "(reload_completed && |
| + REG_P(operands[0]) && REG_P(operands[1]) |
| + && (REGNO(operands[0]) != REGNO(operands[1])))" |
| + [(set (match_dup 0) (match_dup 1)) |
| + (set (match_dup 0) |
| + (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_dup 0) |
| + (const_int -16777216)) |
| + (const_int 24)) |
| + (lshiftrt:SI (and:SI (match_dup 0) |
| + (const_int 16711680)) |
| + (const_int 8))) |
| + (ior:SI (ashift:SI (and:SI (match_dup 0) |
| + (const_int 65280)) |
| + (const_int 8)) |
| + (ashift:SI (and:SI (match_dup 0) |
| + (const_int 255)) |
| + (const_int 24)))))] |
| + "" |
| + |
| + [(set_attr "length" "4,4,4") |
| + (set_attr "type" "alu,store,load_rm")] |
| + ) |
| + |
| + |
| +;;============================================================================= |
| +;; blockage |
| +;;----------------------------------------------------------------------------- |
| +;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and |
| +;; all of memory. This blocks insns from being moved across this point. |
| + |
| +(define_insn "blockage" |
| + [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)] |
| + "" |
| + "" |
| + [(set_attr "length" "0")] |
| +) |
| + |
| +;;============================================================================= |
| +;; clzsi2 |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "clzsi2" |
| + [ (set (match_operand:SI 0 "register_operand" "=r") |
| + (clz:SI (match_operand:SI 1 "register_operand" "r"))) ] |
| + "" |
| + "clz %0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "set_z")] |
| + ) |
| + |
| +;;============================================================================= |
| +;; ctzsi2 |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "ctzsi2" |
| + [ (set (match_operand:SI 0 "register_operand" "=r,r") |
| + (ctz:SI (match_operand:SI 1 "register_operand" "0,r"))) ] |
| + "" |
| + "@ |
| + brev\t%0\;clz\t%0, %0 |
| + mov\t%0, %1\;brev\t%0\;clz\t%0, %0" |
| + [(set_attr "length" "8") |
| + (set_attr "cc" "set_z")] |
| + ) |
| + |
| +;;============================================================================= |
| +;; cache instructions |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "cache" |
| + [ (unspec_volatile [(match_operand:SI 0 "avr32_ks11_address_operand" "p") |
| + (match_operand:SI 1 "immediate_operand" "Ku05")] VUNSPEC_CACHE)] |
| + "" |
| + "cache %0, %1" |
| + [(set_attr "length" "4")] |
| + ) |
| + |
| +(define_insn "sync" |
| + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku08")] VUNSPEC_SYNC)] |
| + "" |
| + "sync %0" |
| + [(set_attr "length" "4")] |
| + ) |
| + |
| +;;============================================================================= |
| +;; TLB instructions |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "tlbr" |
| + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBR)] |
| + "" |
| + "tlbr" |
| + [(set_attr "length" "2")] |
| + ) |
| + |
| +(define_insn "tlbw" |
| + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBW)] |
| + "" |
| + "tlbw" |
| + [(set_attr "length" "2")] |
| + ) |
| + |
| +(define_insn "tlbs" |
| + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBS)] |
| + "" |
| + "tlbs" |
| + [(set_attr "length" "2")] |
| + ) |
| + |
| +;;============================================================================= |
| +;; Breakpoint instruction |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "breakpoint" |
| + [ (unspec_volatile [(const_int 0)] VUNSPEC_BREAKPOINT)] |
| + "" |
| + "breakpoint" |
| + [(set_attr "length" "2")] |
| + ) |
| + |
| + |
| +;;============================================================================= |
| +;; mtsr/mfsr instruction |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "mtsr" |
| + [ (unspec_volatile [(match_operand 0 "immediate_operand" "i") |
| + (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTSR)] |
| + "" |
| + "mtsr\t%0, %1" |
| + [(set_attr "length" "4")] |
| + ) |
| + |
| +(define_insn "mfsr" |
| + [ (set (match_operand:SI 0 "register_operand" "=r") |
| + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFSR)) ] |
| + "" |
| + "mfsr\t%0, %1" |
| + [(set_attr "length" "4")] |
| + ) |
| + |
| +;;============================================================================= |
| +;; mtdr/mfdr instruction |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "mtdr" |
| + [ (unspec_volatile [(match_operand 0 "immediate_operand" "i") |
| + (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTDR)] |
| + "" |
| + "mtdr\t%0, %1" |
| + [(set_attr "length" "4")] |
| + ) |
| + |
| +(define_insn "mfdr" |
| + [ (set (match_operand:SI 0 "register_operand" "=r") |
| + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFDR)) ] |
| + "" |
| + "mfdr\t%0, %1" |
| + [(set_attr "length" "4")] |
| + ) |
| + |
| +;;============================================================================= |
| +;; musfr |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "musfr" |
| + [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r")] VUNSPEC_MUSFR)] |
| + "" |
| + "musfr\t%0" |
| + [(set_attr "length" "2") |
| + (set_attr "cc" "clobber")] |
| + ) |
| + |
| +(define_insn "mustr" |
| + [ (set (match_operand:SI 0 "register_operand" "=r") |
| + (unspec_volatile:SI [(const_int 0)] VUNSPEC_MUSTR)) ] |
| + "" |
| + "mustr\t%0" |
| + [(set_attr "length" "2")] |
| + ) |
| + |
| +(define_insn "ssrf" |
| + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_SSRF)] |
| + "" |
| + "ssrf %0" |
| + [(set_attr "length" "2") |
| + (set_attr "cc" "clobber")] |
| + ) |
| + |
| +(define_insn "csrf" |
| + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_CSRF)] |
| + "" |
| + "csrf %0" |
| + [(set_attr "length" "2") |
| + (set_attr "cc" "clobber")] |
| + ) |
| + |
| +;;============================================================================= |
| +;; Flush Return Stack instruction |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "frs" |
| + [ (unspec_volatile [(const_int 0)] VUNSPEC_FRS)] |
| + "" |
| + "frs" |
| + [(set_attr "length" "2") |
| + (set_attr "cc" "none")] |
| + ) |
| + |
| + |
| +;;============================================================================= |
| +;; Saturation Round Scale instruction |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "sats" |
| + [ (set (match_operand:SI 0 "register_operand" "+r") |
| + (unspec:SI [(match_dup 0) |
| + (match_operand 1 "immediate_operand" "Ku05") |
| + (match_operand 2 "immediate_operand" "Ku05")] |
| + UNSPEC_SATS)) ] |
| + "TARGET_DSP" |
| + "sats\t%0 >> %1, %2" |
| + [(set_attr "type" "alu_sat") |
| + (set_attr "length" "4")] |
| + ) |
| + |
| +(define_insn "satu" |
| + [ (set (match_operand:SI 0 "register_operand" "+r") |
| + (unspec:SI [(match_dup 0) |
| + (match_operand 1 "immediate_operand" "Ku05") |
| + (match_operand 2 "immediate_operand" "Ku05")] |
| + UNSPEC_SATU)) ] |
| + "TARGET_DSP" |
| + "satu\t%0 >> %1, %2" |
| + [(set_attr "type" "alu_sat") |
| + (set_attr "length" "4")] |
| + ) |
| + |
| +(define_insn "satrnds" |
| + [ (set (match_operand:SI 0 "register_operand" "+r") |
| + (unspec:SI [(match_dup 0) |
| + (match_operand 1 "immediate_operand" "Ku05") |
| + (match_operand 2 "immediate_operand" "Ku05")] |
| + UNSPEC_SATRNDS)) ] |
| + "TARGET_DSP" |
| + "satrnds\t%0 >> %1, %2" |
| + [(set_attr "type" "alu_sat") |
| + (set_attr "length" "4")] |
| + ) |
| + |
| +(define_insn "satrndu" |
| + [ (set (match_operand:SI 0 "register_operand" "+r") |
| + (unspec:SI [(match_dup 0) |
| + (match_operand 1 "immediate_operand" "Ku05") |
| + (match_operand 2 "immediate_operand" "Ku05")] |
| + UNSPEC_SATRNDU)) ] |
| + "TARGET_DSP" |
| + "sats\t%0 >> %1, %2" |
| + [(set_attr "type" "alu_sat") |
| + (set_attr "length" "4")] |
| + ) |
| + |
| +;; Special patterns for dealing with the constant pool |
| + |
| +(define_insn "align_4" |
| + [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN)] |
| + "" |
| + { |
| + assemble_align (32); |
| + return ""; |
| + } |
| + [(set_attr "length" "2")] |
| +) |
| + |
| +(define_insn "consttable_start" |
| + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_START)] |
| + "" |
| + { |
| + return ".cpool"; |
| + } |
| + [(set_attr "length" "0")] |
| + ) |
| + |
| +(define_insn "consttable_end" |
| + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_END)] |
| + "" |
| + { |
| + making_const_table = FALSE; |
| + return ""; |
| + } |
| + [(set_attr "length" "0")] |
| +) |
| + |
| + |
| +(define_insn "consttable_4" |
| + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_4)] |
| + "" |
| + { |
| + making_const_table = TRUE; |
| + switch (GET_MODE_CLASS (GET_MODE (operands[0]))) |
| + { |
| + case MODE_FLOAT: |
| + { |
| + REAL_VALUE_TYPE r; |
| + char real_string[1024]; |
| + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]); |
| + real_to_decimal(real_string, &r, 1024, 0, 1); |
| + asm_fprintf (asm_out_file, "\t.float\t%s\n", real_string); |
| + break; |
| + } |
| + default: |
| + assemble_integer (operands[0], 4, 0, 1); |
| + break; |
| + } |
| + return ""; |
| + } |
| + [(set_attr "length" "4")] |
| +) |
| + |
| +(define_insn "consttable_8" |
| + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_8)] |
| + "" |
| + { |
| + making_const_table = TRUE; |
| + switch (GET_MODE_CLASS (GET_MODE (operands[0]))) |
| + { |
| + case MODE_FLOAT: |
| + { |
| + REAL_VALUE_TYPE r; |
| + char real_string[1024]; |
| + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]); |
| + real_to_decimal(real_string, &r, 1024, 0, 1); |
| + asm_fprintf (asm_out_file, "\t.double\t%s\n", real_string); |
| + break; |
| + } |
| + default: |
| + assemble_integer(operands[0], 8, 0, 1); |
| + break; |
| + } |
| + return ""; |
| + } |
| + [(set_attr "length" "8")] |
| +) |
| + |
| +(define_insn "consttable_16" |
| + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_16)] |
| + "" |
| + { |
| + making_const_table = TRUE; |
| + assemble_integer(operands[0], 16, 0, 1); |
| + return ""; |
| + } |
| + [(set_attr "length" "16")] |
| +) |
| + |
| +;;============================================================================= |
| +;; coprocessor instructions |
| +;;----------------------------------------------------------------------------- |
| +(define_insn "cop" |
| + [ (unspec_volatile [(match_operand 0 "immediate_operand" "Ku03") |
| + (match_operand 1 "immediate_operand" "Ku04") |
| + (match_operand 2 "immediate_operand" "Ku04") |
| + (match_operand 3 "immediate_operand" "Ku04") |
| + (match_operand 4 "immediate_operand" "Ku07")] VUNSPEC_COP)] |
| + "" |
| + "cop\tcp%0, cr%1, cr%2, cr%3, %4" |
| + [(set_attr "length" "4")] |
| + ) |
| + |
| +(define_insn "mvcrsi" |
| + [ (set (match_operand:SI 0 "avr32_cop_move_operand" "=r,<,Z") |
| + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03") |
| + (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")] |
| + VUNSPEC_MVCR)) ] |
| + "" |
| + "@ |
| + mvcr.w\tcp%1, %0, cr%2 |
| + stcm.w\tcp%1, %0, cr%2 |
| + stc.w\tcp%1, %0, cr%2" |
| + [(set_attr "length" "4")] |
| + ) |
| + |
| +(define_insn "mvcrdi" |
| + [ (set (match_operand:DI 0 "avr32_cop_move_operand" "=r,<,Z") |
| + (unspec_volatile:DI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03") |
| + (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")] |
| + VUNSPEC_MVCR)) ] |
| + "" |
| + "@ |
| + mvcr.d\tcp%1, %0, cr%2 |
| + stcm.d\tcp%1, %0, cr%2-cr%i2 |
| + stc.d\tcp%1, %0, cr%2" |
| + [(set_attr "length" "4")] |
| + ) |
| + |
| +(define_insn "mvrcsi" |
| + [ (unspec_volatile:SI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03") |
| + (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04") |
| + (match_operand:SI 2 "avr32_cop_move_operand" "r,>,Z")] |
| + VUNSPEC_MVRC)] |
| + "" |
| + { |
| + switch (which_alternative){ |
| + case 0: |
| + return "mvrc.w\tcp%0, cr%1, %2"; |
| + case 1: |
| + return "ldcm.w\tcp%0, %2, cr%1"; |
| + case 2: |
| + return "ldc.w\tcp%0, cr%1, %2"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + [(set_attr "length" "4")] |
| + ) |
| + |
| +(define_insn "mvrcdi" |
| + [ (unspec_volatile:DI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03") |
| + (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04") |
| + (match_operand:DI 2 "avr32_cop_move_operand" "r,>,Z")] |
| + VUNSPEC_MVRC)] |
| + "" |
| + { |
| + switch (which_alternative){ |
| + case 0: |
| + return "mvrc.d\tcp%0, cr%1, %2"; |
| + case 1: |
| + return "ldcm.d\tcp%0, %2, cr%1-cr%i1"; |
| + case 2: |
| + return "ldc.d\tcp%0, cr%1, %2"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + [(set_attr "length" "4")] |
| + ) |
| + |
| +;;============================================================================= |
| +;; epilogue |
| +;;----------------------------------------------------------------------------- |
| +;; This pattern emits RTL for exit from a function. The function exit is |
| +;; responsible for deallocating the stack frame, restoring callee saved |
| +;; registers and emitting the return instruction. |
| +;; ToDo: using TARGET_ASM_FUNCTION_PROLOGUE instead. |
| +;;============================================================================= |
| +(define_expand "epilogue" |
| + [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)] |
| + "" |
| + " |
| + if (USE_RETURN_INSN (FALSE)){ |
| + emit_jump_insn (gen_return ()); |
| + DONE; |
| + } |
| + emit_jump_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode, |
| + gen_rtvec (1, |
| + gen_rtx_RETURN (VOIDmode)), |
| + VUNSPEC_EPILOGUE)); |
| + DONE; |
| + " |
| + ) |
| + |
| +(define_insn "*epilogue_insns" |
| + [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)] |
| + "" |
| + { |
| + avr32_output_return_instruction (FALSE, FALSE, NULL, NULL); |
| + return ""; |
| + } |
| + ; Length is absolute worst case |
| + [(set_attr "type" "branch") |
| + (set_attr "length" "12")] |
| + ) |
| + |
| +(define_insn "*epilogue_insns_ret_imm" |
| + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i")) |
| + (use (reg RETVAL_REGNUM)) |
| + (unspec_volatile [(return)] VUNSPEC_EPILOGUE)])] |
| + "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))" |
| + { |
| + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]); |
| + return ""; |
| + } |
| + ; Length is absolute worst case |
| + [(set_attr "type" "branch") |
| + (set_attr "length" "12")] |
| + ) |
| + |
| +(define_insn "sibcall_epilogue" |
| + [(unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)] |
| + "" |
| + { |
| + avr32_output_return_instruction (FALSE, FALSE, NULL, NULL); |
| + return ""; |
| + } |
| +;; Length is absolute worst case |
| + [(set_attr "type" "branch") |
| + (set_attr "length" "12")] |
| + ) |
| + |
| +(define_insn "*sibcall_epilogue_insns_ret_imm" |
| + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i")) |
| + (use (reg RETVAL_REGNUM)) |
| + (unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)])] |
| + "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))" |
| + { |
| + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]); |
| + return ""; |
| + } |
| + ; Length is absolute worst case |
| + [(set_attr "type" "branch") |
| + (set_attr "length" "12")] |
| + ) |
| + |
| +(define_insn "ldxi" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (mem:SI (plus:SI |
| + (match_operand:SI 1 "register_operand" "r") |
| + (mult:SI (zero_extract:SI (match_operand:SI 2 "register_operand" "r") |
| + (const_int 8) |
| + (match_operand:SI 3 "immediate_operand" "Ku05")) |
| + (const_int 4)))))] |
| + "(INTVAL(operands[3]) == 24 || INTVAL(operands[3]) == 16 || INTVAL(operands[3]) == 8 |
| + || INTVAL(operands[3]) == 0)" |
| + { |
| + switch ( INTVAL(operands[3]) ){ |
| + case 0: |
| + return "ld.w %0, %1[%2:b << 2]"; |
| + case 8: |
| + return "ld.w %0, %1[%2:l << 2]"; |
| + case 16: |
| + return "ld.w %0, %1[%2:u << 2]"; |
| + case 24: |
| + return "ld.w %0, %1[%2:t << 2]"; |
| + default: |
| + internal_error("illegal operand for ldxi"); |
| + } |
| + } |
| + [(set_attr "type" "load") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| + |
| + |
| + |
| + |
| + |
| + |
| +;;============================================================================= |
| +;; Peephole optimizing |
| +;;----------------------------------------------------------------------------- |
| +;; Changing |
| +;; sub r8, r7, 8 |
| +;; st.w r8[0x0], r12 |
| +;; to |
| +;; sub r8, r7, 8 |
| +;; st.w r7[-0x8], r12 |
| +;;============================================================================= |
| +; (set (reg:SI 9 r8) |
| +; (plus:SI (reg/f:SI 6 r7) |
| +; (const_int ...))) |
| +; (set (mem:SI (reg:SI 9 r8)) |
| +; (reg:SI 12 r12)) |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (plus:SI (match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "immediate_operand" ""))) |
| + (set (mem:SI (match_dup 0)) |
| + (match_operand:SI 3 "register_operand" ""))] |
| + "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")" |
| + [(set (match_dup 0) |
| + (plus:SI (match_dup 1) |
| + (match_dup 2))) |
| + (set (mem:SI (plus:SI (match_dup 1) |
| + (match_dup 2))) |
| + (match_dup 3))] |
| + "") |
| + |
| +;;============================================================================= |
| +;; Peephole optimizing |
| +;;----------------------------------------------------------------------------- |
| +;; Changing |
| +;; sub r6, r7, 4 |
| +;; ld.w r6, r6[0x0] |
| +;; to |
| +;; sub r6, r7, 4 |
| +;; ld.w r6, r7[-0x4] |
| +;;============================================================================= |
| +; (set (reg:SI 7 r6) |
| +; (plus:SI (reg/f:SI 6 r7) |
| +; (const_int -4 [0xfffffffc]))) |
| +; (set (reg:SI 7 r6) |
| +; (mem:SI (reg:SI 7 r6))) |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (plus:SI (match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "immediate_operand" ""))) |
| + (set (match_operand:SI 3 "register_operand" "") |
| + (mem:SI (match_dup 0)))] |
| + "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")" |
| + [(set (match_dup 0) |
| + (plus:SI (match_dup 1) |
| + (match_dup 2))) |
| + (set (match_dup 3) |
| + (mem:SI (plus:SI (match_dup 1) |
| + (match_dup 2))))] |
| + "") |
| + |
| +;;============================================================================= |
| +;; Peephole optimizing |
| +;;----------------------------------------------------------------------------- |
| +;; Changing |
| +;; ld.sb r0, r7[-0x6] |
| +;; cashs.b r0 |
| +;; to |
| +;; ld.sb r0, r7[-0x6] |
| +;;============================================================================= |
| +(define_peephole2 |
| + [(set (match_operand:QI 0 "register_operand" "") |
| + (match_operand:QI 1 "load_sb_memory_operand" "")) |
| + (set (match_operand:SI 2 "register_operand" "") |
| + (sign_extend:SI (match_dup 0)))] |
| + "(REGNO(operands[0]) == REGNO(operands[2]) || peep2_reg_dead_p(2, operands[0]))" |
| + [(set (match_dup 2) |
| + (sign_extend:SI (match_dup 1)))] |
| + "") |
| + |
| +;;============================================================================= |
| +;; Peephole optimizing |
| +;;----------------------------------------------------------------------------- |
| +;; Changing |
| +;; ld.ub r0, r7[-0x6] |
| +;; cashu.b r0 |
| +;; to |
| +;; ld.ub r0, r7[-0x6] |
| +;;============================================================================= |
| +(define_peephole2 |
| + [(set (match_operand:QI 0 "register_operand" "") |
| + (match_operand:QI 1 "memory_operand" "")) |
| + (set (match_operand:SI 2 "register_operand" "") |
| + (zero_extend:SI (match_dup 0)))] |
| + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])" |
| + [(set (match_dup 2) |
| + (zero_extend:SI (match_dup 1)))] |
| + "") |
| + |
| +;;============================================================================= |
| +;; Peephole optimizing |
| +;;----------------------------------------------------------------------------- |
| +;; Changing |
| +;; ld.sh r0, r7[-0x6] |
| +;; casts.h r0 |
| +;; to |
| +;; ld.sh r0, r7[-0x6] |
| +;;============================================================================= |
| +(define_peephole2 |
| + [(set (match_operand:HI 0 "register_operand" "") |
| + (match_operand:HI 1 "memory_operand" "")) |
| + (set (match_operand:SI 2 "register_operand" "") |
| + (sign_extend:SI (match_dup 0)))] |
| + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])" |
| + [(set (match_dup 2) |
| + (sign_extend:SI (match_dup 1)))] |
| + "") |
| + |
| +;;============================================================================= |
| +;; Peephole optimizing |
| +;;----------------------------------------------------------------------------- |
| +;; Changing |
| +;; ld.uh r0, r7[-0x6] |
| +;; castu.h r0 |
| +;; to |
| +;; ld.uh r0, r7[-0x6] |
| +;;============================================================================= |
| +(define_peephole2 |
| + [(set (match_operand:HI 0 "register_operand" "") |
| + (match_operand:HI 1 "memory_operand" "")) |
| + (set (match_operand:SI 2 "register_operand" "") |
| + (zero_extend:SI (match_dup 0)))] |
| + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])" |
| + [(set (match_dup 2) |
| + (zero_extend:SI (match_dup 1)))] |
| + "") |
| + |
| +;;============================================================================= |
| +;; Peephole optimizing |
| +;;----------------------------------------------------------------------------- |
| +;; Changing |
| +;; mul rd, rx, ry |
| +;; add rd2, rd |
| +;; or |
| +;; add rd2, rd, rd2 |
| +;; to |
| +;; mac rd2, rx, ry |
| +;;============================================================================= |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (mult:SI (match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "register_operand" ""))) |
| + (set (match_operand:SI 3 "register_operand" "") |
| + (plus:SI (match_dup 3) |
| + (match_dup 0)))] |
| + "peep2_reg_dead_p(2, operands[0])" |
| + [(set (match_dup 3) |
| + (plus:SI (mult:SI (match_dup 1) |
| + (match_dup 2)) |
| + (match_dup 3)))] |
| + "") |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (mult:SI (match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "register_operand" ""))) |
| + (set (match_operand:SI 3 "register_operand" "") |
| + (plus:SI (match_dup 0) |
| + (match_dup 3)))] |
| + "peep2_reg_dead_p(2, operands[0])" |
| + [(set (match_dup 3) |
| + (plus:SI (mult:SI (match_dup 1) |
| + (match_dup 2)) |
| + (match_dup 3)))] |
| + "") |
| + |
| + |
| +;;============================================================================= |
| +;; Peephole optimizing |
| +;;----------------------------------------------------------------------------- |
| +;; Changing |
| +;; bfextu rd, rs, k5, 1 or and(h/l) rd, one_bit_set_mask |
| +;; to |
| +;; bld rs, k5 |
| +;; |
| +;; If rd is dead after the operation. |
| +;;============================================================================= |
| +(define_peephole2 |
| + [ (set (match_operand:SI 0 "register_operand" "") |
| + (zero_extract:SI (match_operand:SI 1 "register_operand" "") |
| + (const_int 1) |
| + (match_operand:SI 2 "immediate_operand" ""))) |
| + (set (cc0) |
| + (match_dup 0))] |
| + "peep2_reg_dead_p(2, operands[0])" |
| + [(set (cc0) |
| + (and:SI (match_dup 1) |
| + (match_dup 2)))] |
| + "operands[2] = GEN_INT(1 << INTVAL(operands[2]));") |
| + |
| +(define_peephole2 |
| + [ (set (match_operand:SI 0 "register_operand" "") |
| + (and:SI (match_operand:SI 1 "register_operand" "") |
| + (match_operand:SI 2 "one_bit_set_operand" ""))) |
| + (set (cc0) |
| + (match_dup 0))] |
| + "peep2_reg_dead_p(2, operands[0])" |
| + [(set (cc0) |
| + (and:SI (match_dup 1) |
| + (match_dup 2)))] |
| + "") |
| + |
| +;;============================================================================= |
| +;; Peephole optimizing |
| +;;----------------------------------------------------------------------------- |
| +;; Load with extracted index: ld.w Rd, Rb[Ri:{t/u/b/l} << 2] |
| +;; |
| +;;============================================================================= |
| + |
| + |
| +(define_peephole |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (zero_extract:SI (match_operand:SI 1 "register_operand" "") |
| + (const_int 8) |
| + (match_operand:SI 2 "avr32_extract_shift_operand" ""))) |
| + (set (match_operand:SI 3 "register_operand" "") |
| + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| + (match_operand:SI 4 "register_operand" ""))))] |
| + |
| + "(dead_or_set_p(insn, operands[0]))" |
| + { |
| + switch ( INTVAL(operands[2]) ){ |
| + case 0: |
| + return "ld.w %3, %4[%1:b << 2]"; |
| + case 8: |
| + return "ld.w %3, %4[%1:l << 2]"; |
| + case 16: |
| + return "ld.w %3, %4[%1:u << 2]"; |
| + case 24: |
| + return "ld.w %3, %4[%1:t << 2]"; |
| + default: |
| + internal_error("illegal operand for ldxi"); |
| + } |
| + } |
| + [(set_attr "type" "load") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "clobber")] |
| + ) |
| + |
| + |
| + |
| +(define_peephole |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (and:SI (match_operand:SI 1 "register_operand" "") (const_int 255))) |
| + (set (match_operand:SI 2 "register_operand" "") |
| + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| + (match_operand:SI 3 "register_operand" ""))))] |
| + |
| + "(dead_or_set_p(insn, operands[0]))" |
| + |
| + "ld.w %2, %3[%1:b << 2]" |
| + [(set_attr "type" "load") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "clobber")] |
| + ) |
| + |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (zero_extract:SI (match_operand:SI 1 "register_operand" "") |
| + (const_int 8) |
| + (match_operand:SI 2 "avr32_extract_shift_operand" ""))) |
| + (set (match_operand:SI 3 "register_operand" "") |
| + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| + (match_operand:SI 4 "register_operand" ""))))] |
| + |
| + "(peep2_reg_dead_p(2, operands[0])) |
| + || (REGNO(operands[0]) == REGNO(operands[3]))" |
| + [(set (match_dup 3) |
| + (mem:SI (plus:SI |
| + (match_dup 4) |
| + (mult:SI (zero_extract:SI (match_dup 1) |
| + (const_int 8) |
| + (match_dup 2)) |
| + (const_int 4)))))] |
| + ) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (zero_extend:SI (match_operand:QI 1 "register_operand" ""))) |
| + (set (match_operand:SI 2 "register_operand" "") |
| + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| + (match_operand:SI 3 "register_operand" ""))))] |
| + |
| + "(peep2_reg_dead_p(2, operands[0])) |
| + || (REGNO(operands[0]) == REGNO(operands[2]))" |
| + [(set (match_dup 2) |
| + (mem:SI (plus:SI |
| + (match_dup 3) |
| + (mult:SI (zero_extract:SI (match_dup 1) |
| + (const_int 8) |
| + (const_int 0)) |
| + (const_int 4)))))] |
| + "operands[1] = gen_rtx_REG(SImode, REGNO(operands[1]));" |
| + ) |
| + |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (and:SI (match_operand:SI 1 "register_operand" "") |
| + (const_int 255))) |
| + (set (match_operand:SI 2 "register_operand" "") |
| + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| + (match_operand:SI 3 "register_operand" ""))))] |
| + |
| + "(peep2_reg_dead_p(2, operands[0])) |
| + || (REGNO(operands[0]) == REGNO(operands[2]))" |
| + [(set (match_dup 2) |
| + (mem:SI (plus:SI |
| + (match_dup 3) |
| + (mult:SI (zero_extract:SI (match_dup 1) |
| + (const_int 8) |
| + (const_int 0)) |
| + (const_int 4)))))] |
| + "" |
| + ) |
| + |
| + |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (lshiftrt:SI (match_operand:SI 1 "register_operand" "") |
| + (const_int 24))) |
| + (set (match_operand:SI 2 "register_operand" "") |
| + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4)) |
| + (match_operand:SI 3 "register_operand" ""))))] |
| + |
| + "(peep2_reg_dead_p(2, operands[0])) |
| + || (REGNO(operands[0]) == REGNO(operands[2]))" |
| + [(set (match_dup 2) |
| + (mem:SI (plus:SI |
| + (match_dup 3) |
| + (mult:SI (zero_extract:SI (match_dup 1) |
| + (const_int 8) |
| + (const_int 24)) |
| + (const_int 4)))))] |
| + "" |
| + ) |
| + |
| + |
| +;;************************************************ |
| +;; ANDN |
| +;; |
| +;;************************************************ |
| + |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (not:SI (match_operand:SI 1 "register_operand" ""))) |
| + (set (match_operand:SI 2 "register_operand" "") |
| + (and:SI (match_dup 2) |
| + (match_dup 0)))] |
| + "peep2_reg_dead_p(2, operands[0])" |
| + |
| + [(set (match_dup 2) |
| + (and:SI (match_dup 2) |
| + (not:SI (match_dup 1)) |
| + ))] |
| + "" |
| +) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (not:SI (match_operand:SI 1 "register_operand" ""))) |
| + (set (match_operand:SI 2 "register_operand" "") |
| + (and:SI (match_dup 0) |
| + (match_dup 2) |
| + ))] |
| + "peep2_reg_dead_p(2, operands[0])" |
| + |
| + [(set (match_dup 2) |
| + (and:SI (match_dup 2) |
| + (not:SI (match_dup 1)) |
| + ))] |
| + |
| + "" |
| +) |
| + |
| + |
| +;;================================================================= |
| +;; Addabs peephole |
| +;;================================================================= |
| + |
| +(define_peephole |
| + [(set (match_operand:SI 2 "register_operand" "=r") |
| + (abs:SI (match_operand:SI 1 "register_operand" "r"))) |
| + (set (match_operand:SI 0 "register_operand" "=r") |
| + (plus:SI (match_operand:SI 3 "register_operand" "r") |
| + (match_dup 2)))] |
| + "dead_or_set_p(insn, operands[2])" |
| + "addabs %0, %3, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "set_z")]) |
| + |
| +(define_peephole |
| + [(set (match_operand:SI 2 "register_operand" "=r") |
| + (abs:SI (match_operand:SI 1 "register_operand" "r"))) |
| + (set (match_operand:SI 0 "register_operand" "=r") |
| + (plus:SI (match_dup 2) |
| + (match_operand:SI 3 "register_operand" "r")))] |
| + "dead_or_set_p(insn, operands[2])" |
| + "addabs %0, %3, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "cc" "set_z")]) |
| + |
| + |
| +;;================================================================= |
| +;; Detect roundings |
| +;;================================================================= |
| + |
| +(define_insn "*round" |
| + [(set (match_operand:SI 0 "register_operand" "+r") |
| + (ashiftrt:SI (plus:SI (match_dup 0) |
| + (match_operand:SI 1 "immediate_operand" "i")) |
| + (match_operand:SI 2 "immediate_operand" "i")))] |
| + "avr32_rnd_operands(operands[1], operands[2])" |
| + |
| + "satrnds %0 >> %2, 31" |
| + |
| + [(set_attr "type" "alu_sat") |
| + (set_attr "length" "4")] |
| + |
| + ) |
| + |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (plus:SI (match_dup 0) |
| + (match_operand:SI 1 "immediate_operand" ""))) |
| + (set (match_dup 0) |
| + (ashiftrt:SI (match_dup 0) |
| + (match_operand:SI 2 "immediate_operand" "")))] |
| + "avr32_rnd_operands(operands[1], operands[2])" |
| + |
| + [(set (match_dup 0) |
| + (ashiftrt:SI (plus:SI (match_dup 0) |
| + (match_dup 1)) |
| + (match_dup 2)))] |
| + ) |
| + |
| +(define_peephole |
| + [(set (match_operand:SI 0 "register_operand" "r") |
| + (plus:SI (match_dup 0) |
| + (match_operand:SI 1 "immediate_operand" "i"))) |
| + (set (match_dup 0) |
| + (ashiftrt:SI (match_dup 0) |
| + (match_operand:SI 2 "immediate_operand" "i")))] |
| + "avr32_rnd_operands(operands[1], operands[2])" |
| + |
| + "satrnds %0 >> %2, 31" |
| + |
| + [(set_attr "type" "alu_sat") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "clobber")] |
| + |
| + ) |
| + |
| + |
| +;;================================================================= |
| +;; mcall |
| +;;================================================================= |
| +(define_peephole |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (match_operand 1 "avr32_const_pool_ref_operand" "")) |
| + (parallel [(call (mem:SI (match_dup 0)) |
| + (match_operand 2 "" "")) |
| + (clobber (reg:SI LR_REGNUM))])] |
| + "dead_or_set_p(insn, operands[0])" |
| + "mcall %1" |
| + [(set_attr "type" "call") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "clobber")] |
| +) |
| + |
| +(define_peephole |
| + [(set (match_operand:SI 2 "register_operand" "") |
| + (match_operand 1 "avr32_const_pool_ref_operand" "")) |
| + (parallel [(set (match_operand 0 "register_operand" "") |
| + (call (mem:SI (match_dup 2)) |
| + (match_operand 3 "" ""))) |
| + (clobber (reg:SI LR_REGNUM))])] |
| + "dead_or_set_p(insn, operands[2])" |
| + "mcall %1" |
| + [(set_attr "type" "call") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "call_set")] |
| +) |
| + |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (match_operand 1 "avr32_const_pool_ref_operand" "")) |
| + (parallel [(call (mem:SI (match_dup 0)) |
| + (match_operand 2 "" "")) |
| + (clobber (reg:SI LR_REGNUM))])] |
| + "peep2_reg_dead_p(2, operands[0])" |
| + [(parallel [(call (mem:SI (match_dup 1)) |
| + (match_dup 2)) |
| + (clobber (reg:SI LR_REGNUM))])] |
| + "" |
| +) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (match_operand 1 "avr32_const_pool_ref_operand" "")) |
| + (parallel [(set (match_operand 2 "register_operand" "") |
| + (call (mem:SI (match_dup 0)) |
| + (match_operand 3 "" ""))) |
| + (clobber (reg:SI LR_REGNUM))])] |
| + "(peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[2]) == REGNO(operands[0])))" |
| + [(parallel [(set (match_dup 2) |
| + (call (mem:SI (match_dup 1)) |
| + (match_dup 3))) |
| + (clobber (reg:SI LR_REGNUM))])] |
| + "" |
| +) |
| + |
| +;;================================================================= |
| +;; Returning a value |
| +;;================================================================= |
| + |
| + |
| +(define_peephole |
| + [(set (match_operand 0 "register_operand" "") |
| + (match_operand 1 "register_operand" "")) |
| + (return)] |
| + "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM) |
| + && (REGNO(operands[1]) != LR_REGNUM) |
| + && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS)" |
| + "retal %1" |
| + [(set_attr "type" "call") |
| + (set_attr "length" "2")] |
| + ) |
| + |
| + |
| +(define_peephole |
| + [(set (match_operand 0 "register_operand" "r") |
| + (match_operand 1 "immediate_operand" "i")) |
| + (return)] |
| + "(USE_RETURN_INSN (FALSE) && (REGNO(operands[0]) == RETVAL_REGNUM) && |
| + ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1)))" |
| + { |
| + avr32_output_return_instruction (TRUE, FALSE, NULL, operands[1]); |
| + return ""; |
| + } |
| + [(set_attr "type" "call") |
| + (set_attr "length" "4")] |
| + ) |
| + |
| +(define_peephole |
| + [(set (match_operand 0 "register_operand" "r") |
| + (match_operand 1 "immediate_operand" "i")) |
| + (unspec_volatile [(return)] VUNSPEC_EPILOGUE)] |
| + "(REGNO(operands[0]) == RETVAL_REGNUM) && |
| + ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1))" |
| + { |
| + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[1]); |
| + return ""; |
| + } |
| + ; Length is absolute worst case |
| + [(set_attr "type" "branch") |
| + (set_attr "length" "12")] |
| + ) |
| + |
| +(define_peephole |
| + [(set (match_operand 0 "register_operand" "=r") |
| + (if_then_else (match_operator 1 "avr32_comparison_operator" |
| + [(match_operand 4 "register_operand" "r") |
| + (match_operand 5 "register_immediate_operand" "rKs21")]) |
| + (match_operand 2 "avr32_cond_register_immediate_operand" "rKs08") |
| + (match_operand 3 "avr32_cond_register_immediate_operand" "rKs08"))) |
| + (return)] |
| + "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)" |
| + { |
| + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]); |
| + |
| + if ( GET_CODE(operands[2]) == REG |
| + && GET_CODE(operands[3]) == REG |
| + && REGNO(operands[2]) != LR_REGNUM |
| + && REGNO(operands[3]) != LR_REGNUM ){ |
| + return "ret%1 %2\;ret%i1 %3"; |
| + } else if ( GET_CODE(operands[2]) == REG |
| + && GET_CODE(operands[3]) == CONST_INT ){ |
| + if ( INTVAL(operands[3]) == -1 |
| + || INTVAL(operands[3]) == 0 |
| + || INTVAL(operands[3]) == 1 ){ |
| + return "ret%1 %2\;ret%i1 %d3"; |
| + } else { |
| + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12"; |
| + } |
| + } else if ( GET_CODE(operands[2]) == CONST_INT |
| + && GET_CODE(operands[3]) == REG ){ |
| + if ( INTVAL(operands[2]) == -1 |
| + || INTVAL(operands[2]) == 0 |
| + || INTVAL(operands[2]) == 1 ){ |
| + return "ret%1 %d2\;ret%i1 %3"; |
| + } else { |
| + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12"; |
| + } |
| + } else { |
| + if ( (INTVAL(operands[2]) == -1 |
| + || INTVAL(operands[2]) == 0 |
| + || INTVAL(operands[2]) == 1 ) |
| + && (INTVAL(operands[3]) == -1 |
| + || INTVAL(operands[3]) == 0 |
| + || INTVAL(operands[3]) == 1 )){ |
| + return "ret%1 %d2\;ret%i1 %d3"; |
| + } else { |
| + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12"; |
| + } |
| + } |
| + } |
| + |
| + [(set_attr "length" "10") |
| + (set_attr "cc" "none") |
| + (set_attr "type" "call")]) |
| + |
| + |
| + |
| +;;================================================================= |
| +;; mulnhh.w |
| +;;================================================================= |
| + |
| +(define_peephole2 |
| + [(set (match_operand:HI 0 "register_operand" "") |
| + (neg:HI (match_operand:HI 1 "register_operand" ""))) |
| + (set (match_operand:SI 2 "register_operand" "") |
| + (mult:SI |
| + (sign_extend:SI (match_dup 0)) |
| + (sign_extend:SI (match_operand:HI 3 "register_operand" ""))))] |
| + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))" |
| + [ (set (match_dup 2) |
| + (mult:SI |
| + (sign_extend:SI (neg:HI (match_dup 1))) |
| + (sign_extend:SI (match_dup 3))))] |
| + "" |
| + ) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:HI 0 "register_operand" "") |
| + (neg:HI (match_operand:HI 1 "register_operand" ""))) |
| + (set (match_operand:SI 2 "register_operand" "") |
| + (mult:SI |
| + (sign_extend:SI (match_operand:HI 3 "register_operand" "")) |
| + (sign_extend:SI (match_dup 0))))] |
| + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))" |
| + [ (set (match_dup 2) |
| + (mult:SI |
| + (sign_extend:SI (neg:HI (match_dup 1))) |
| + (sign_extend:SI (match_dup 3))))] |
| + "" |
| + ) |
| + |
| + |
| + |
| +;;================================================================= |
| +;; Vector set and extract operations |
| +;;================================================================= |
| +(define_insn "vec_setv2hi_hi" |
| + [(set (match_operand:V2HI 0 "register_operand" "=r") |
| + (vec_merge:V2HI |
| + (match_dup 0) |
| + (vec_duplicate:V2HI |
| + (match_operand:HI 1 "register_operand" "r")) |
| + (const_int 1)))] |
| + "" |
| + "bfins\t%0, %1, 16, 16" |
| + [(set_attr "type" "alu") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "clobber")]) |
| + |
| +(define_insn "vec_setv2hi_lo" |
| + [(set (match_operand:V2HI 0 "register_operand" "+r") |
| + (vec_merge:V2HI |
| + (match_dup 0) |
| + (vec_duplicate:V2HI |
| + (match_operand:HI 1 "register_operand" "r")) |
| + (const_int 2)))] |
| + "" |
| + "bfins\t%0, %1, 0, 16" |
| + [(set_attr "type" "alu") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "clobber")]) |
| + |
| +(define_expand "vec_setv2hi" |
| + [(set (match_operand:V2HI 0 "register_operand" "") |
| + (vec_merge:V2HI |
| + (match_dup 0) |
| + (vec_duplicate:V2HI |
| + (match_operand:HI 1 "register_operand" "")) |
| + (match_operand 2 "immediate_operand" "")))] |
| + "" |
| + { operands[2] = GEN_INT(INTVAL(operands[2]) + 1); } |
| + ) |
| + |
| +(define_insn "vec_extractv2hi" |
| + [(set (match_operand:HI 0 "register_operand" "=r") |
| + (vec_select:HI |
| + (match_operand:V2HI 1 "register_operand" "r") |
| + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))] |
| + "" |
| + { |
| + if ( INTVAL(operands[2]) == 0 ) |
| + return "bfextu\t%0, %1, 16, 16"; |
| + else |
| + return "bfextu\t%0, %1, 0, 16"; |
| + } |
| + [(set_attr "type" "alu") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "clobber")]) |
| + |
| +(define_insn "vec_extractv4qi" |
| + [(set (match_operand:QI 0 "register_operand" "=r") |
| + (vec_select:QI |
| + (match_operand:V4QI 1 "register_operand" "r") |
| + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))] |
| + "" |
| + { |
| + switch ( INTVAL(operands[2]) ){ |
| + case 0: |
| + return "bfextu\t%0, %1, 24, 8"; |
| + case 1: |
| + return "bfextu\t%0, %1, 16, 8"; |
| + case 2: |
| + return "bfextu\t%0, %1, 8, 8"; |
| + case 3: |
| + return "bfextu\t%0, %1, 0, 8"; |
| + default: |
| + abort(); |
| + } |
| + } |
| + [(set_attr "type" "alu") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "clobber")]) |
| + |
| + |
| +(define_insn "concatv2hi" |
| + [(set (match_operand:V2HI 0 "register_operand" "=r, r, r") |
| + (vec_concat:V2HI |
| + (match_operand:HI 1 "register_operand" "r, r, 0") |
| + (match_operand:HI 2 "register_operand" "r, 0, r")))] |
| + "" |
| + "@ |
| + mov\t%0, %1\;bfins\t%0, %2, 0, 16 |
| + bfins\t%0, %2, 0, 16 |
| + bfins\t%0, %1, 16, 16" |
| + [(set_attr "length" "6, 4, 4") |
| + (set_attr "type" "alu")]) |
| + |
| + |
| +;; Load the atomic operation description |
| +(include "sync.md") |
| + |
| +;; Load the SIMD description |
| +(include "simd.md") |
| + |
| +;; Load the FP coprAocessor patterns |
| +(include "fpcp.md") |
| --- /dev/null |
| +++ b/gcc/config/avr32/avr32-modes.def |
| @@ -0,0 +1 @@ |
| +VECTOR_MODES (INT, 4); /* V4QI V2HI */ |
| --- /dev/null |
| +++ b/gcc/config/avr32/avr32.opt |
| @@ -0,0 +1,86 @@ |
| +; Options for the ATMEL AVR32 port of the compiler. |
| + |
| +; Copyright 2007 Atmel Corporation. |
| +; |
| +; This file is part of GCC. |
| +; |
| +; GCC is free software; you can redistribute it and/or modify it under |
| +; the terms of the GNU General Public License as published by the Free |
| +; Software Foundation; either version 2, or (at your option) any later |
| +; version. |
| +; |
| +; GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| +; WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| +; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| +; for more details. |
| +; |
| +; You should have received a copy of the GNU General Public License |
| +; along with GCC; see the file COPYING. If not, write to the Free |
| +; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA |
| +; 02110-1301, USA. |
| + |
| +muse-rodata-section |
| +Target Report Mask(USE_RODATA_SECTION) |
| +Use section .rodata for read-only data instead of .text. |
| + |
| +mhard-float |
| +Target Report Undocumented Mask(HARD_FLOAT) |
| +Use floating point coprocessor instructions. |
| + |
| +msoft-float |
| +Target Report Undocumented InverseMask(HARD_FLOAT, SOFT_FLOAT) |
| +Use software floating-point library for floating-point operations. |
| + |
| +mforce-double-align |
| +Target Report RejectNegative Mask(FORCE_DOUBLE_ALIGN) |
| +Force double-word alignment for double-word memory accesses. |
| + |
| +mno-init-got |
| +Target Report RejectNegative Mask(NO_INIT_GOT) |
| +Do not initialize GOT register before using it when compiling PIC code. |
| + |
| +mrelax |
| +Target Report Mask(RELAX) |
| +Let invoked assembler and linker do relaxing (Enabled by default when optimization level is >1). |
| + |
| +mmd-reorg-opt |
| +Target Report Undocumented Mask(MD_REORG_OPTIMIZATION) |
| +Perform machine dependent optimizations in reorg stage. |
| + |
| +masm-addr-pseudos |
| +Target Report Mask(HAS_ASM_ADDR_PSEUDOS) |
| +Use assembler pseudo-instructions lda.w and call for handling direct addresses. (Enabled by default) |
| + |
| +mpart= |
| +Target Report RejectNegative Joined Var(avr32_part_name) |
| +Specify the AVR32 part name |
| + |
| +mcpu= |
| +Target Report RejectNegative Joined Undocumented Var(avr32_part_name) |
| +Specify the AVR32 part name (deprecated) |
| + |
| +march= |
| +Target Report RejectNegative Joined Var(avr32_arch_name) |
| +Specify the AVR32 architecture name |
| + |
| +mfast-float |
| +Target Report Mask(FAST_FLOAT) |
| +Enable fast floating-point library. Enabled by default if the -funsafe-math-optimizations switch is specified. |
| + |
| +mimm-in-const-pool |
| +Target Report Var(avr32_imm_in_const_pool) Init(-1) |
| +Put large immediates in constant pool. This is enabled by default for archs with insn-cache. |
| + |
| +mno-pic |
| +Target Report RejectNegative Mask(NO_PIC) |
| +Do not generate position-independent code. (deprecated, use -fno-pic instead) |
| + |
| +mcond-exec-before-reload |
| +Target Report Undocumented Mask(COND_EXEC_BEFORE_RELOAD) |
| +Enable experimental conditional execution preparation before the reload stage. |
| + |
| +mrmw-addressable-data |
| +Target Report Mask(RMW_ADDRESSABLE_DATA) |
| +Signal that all data is in range for the Atomic Read-Modify-Write memory instructions, and that |
| +gcc can safely generate these whenever possible. |
| + |
| --- /dev/null |
| +++ b/gcc/config/avr32/avr32-protos.h |
| @@ -0,0 +1,196 @@ |
| +/* |
| + Prototypes for exported functions defined in avr32.c |
| + Copyright 2003-2006 Atmel Corporation. |
| + |
| + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| + Initial porting by Anders �dland. |
| + |
| + This file is part of GCC. |
| + |
| + This program is free software; you can redistribute it and/or modify |
| + it under the terms of the GNU General Public License as published by |
| + the Free Software Foundation; either version 2 of the License, or |
| + (at your option) any later version. |
| + |
| + This program is distributed in the hope that it will be useful, |
| + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + GNU General Public License for more details. |
| + |
| + You should have received a copy of the GNU General Public License |
| + along with this program; if not, write to the Free Software |
| + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ |
| + |
| + |
| +#ifndef AVR32_PROTOS_H |
| +#define AVR32_PROTOS_H |
| + |
| +extern const int swap_reg[]; |
| + |
| +extern int avr32_valid_macmac_bypass (rtx, rtx); |
| +extern int avr32_valid_mulmac_bypass (rtx, rtx); |
| + |
| +extern int avr32_decode_lcomm_symbol_offset (rtx, int *); |
| +extern void avr32_encode_lcomm_symbol_offset (tree, char *, int); |
| + |
| +extern const char *avr32_strip_name_encoding (const char *); |
| + |
| +extern rtx avr32_get_note_reg_equiv (rtx insn); |
| + |
| +extern int avr32_use_return_insn (int iscond); |
| + |
| +extern void avr32_make_reglist16 (int reglist16_vect, char *reglist16_string); |
| + |
| +extern void avr32_make_reglist8 (int reglist8_vect, char *reglist8_string); |
| +extern void avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string); |
| +extern void avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string); |
| + |
| +extern void avr32_output_return_instruction (int single_ret_inst, |
| + int iscond, rtx cond, |
| + rtx r12_imm); |
| +extern void avr32_expand_prologue (void); |
| +extern void avr32_set_return_address (rtx source, rtx scratch); |
| + |
| +extern int avr32_hard_regno_mode_ok (int regno, enum machine_mode mode); |
| +extern int avr32_extra_constraint_s (rtx value, const int strict); |
| +extern int avr32_eh_return_data_regno (const int n); |
| +extern int avr32_initial_elimination_offset (const int from, const int to); |
| +extern rtx avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode, |
| + tree type, int named); |
| +extern void avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype, |
| + rtx libname, tree fndecl); |
| +extern void avr32_function_arg_advance (CUMULATIVE_ARGS * cum, |
| + enum machine_mode mode, |
| + tree type, int named); |
| +#ifdef ARGS_SIZE_RTX |
| +/* expr.h defines ARGS_SIZE_RTX and `enum direction'. */ |
| +extern enum direction avr32_function_arg_padding (enum machine_mode mode, |
| + tree type); |
| +#endif /* ARGS_SIZE_RTX */ |
| +extern rtx avr32_function_value (tree valtype, tree func, bool outgoing); |
| +extern rtx avr32_libcall_value (enum machine_mode mode); |
| +extern int avr32_sched_use_dfa_pipeline_interface (void); |
| +extern bool avr32_return_in_memory (tree type, tree fntype); |
| +extern void avr32_regs_to_save (char *operand); |
| +extern void avr32_target_asm_function_prologue (FILE * file, |
| + HOST_WIDE_INT size); |
| +extern void avr32_target_asm_function_epilogue (FILE * file, |
| + HOST_WIDE_INT size); |
| +extern void avr32_trampoline_template (FILE * file); |
| +extern void avr32_initialize_trampoline (rtx addr, rtx fnaddr, |
| + rtx static_chain); |
| +extern int avr32_legitimate_address (enum machine_mode mode, rtx x, |
| + int strict); |
| +extern int avr32_legitimate_constant_p (rtx x); |
| + |
| +extern int avr32_legitimate_pic_operand_p (rtx x); |
| + |
| +extern rtx avr32_find_symbol (rtx x); |
| +extern void avr32_select_section (rtx exp, int reloc, int align); |
| +extern void avr32_encode_section_info (tree decl, rtx rtl, int first); |
| +extern void avr32_asm_file_end (FILE * stream); |
| +extern void avr32_asm_output_ascii (FILE * stream, char *ptr, int len); |
| +extern void avr32_asm_output_common (FILE * stream, const char *name, |
| + int size, int rounded); |
| +extern void avr32_asm_output_label (FILE * stream, const char *name); |
| +extern void avr32_asm_declare_object_name (FILE * stream, char *name, |
| + tree decl); |
| +extern void avr32_asm_globalize_label (FILE * stream, const char *name); |
| +extern void avr32_asm_weaken_label (FILE * stream, const char *name); |
| +extern void avr32_asm_output_external (FILE * stream, tree decl, |
| + const char *name); |
| +extern void avr32_asm_output_external_libcall (FILE * stream, rtx symref); |
| +extern void avr32_asm_output_labelref (FILE * stream, const char *name); |
| +extern void avr32_notice_update_cc (rtx exp, rtx insn); |
| +extern void avr32_print_operand (FILE * stream, rtx x, int code); |
| +extern void avr32_print_operand_address (FILE * stream, rtx x); |
| + |
| +extern int avr32_symbol (rtx x); |
| + |
| +extern void avr32_select_rtx_section (enum machine_mode mode, rtx x, |
| + unsigned HOST_WIDE_INT align); |
| + |
| +extern int avr32_load_multiple_operation (rtx op, enum machine_mode mode); |
| +extern int avr32_store_multiple_operation (rtx op, enum machine_mode mode); |
| + |
| +extern int avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, |
| + const char *str); |
| + |
| +extern bool avr32_cannot_force_const_mem (rtx x); |
| + |
| +extern void avr32_init_builtins (void); |
| + |
| +extern rtx avr32_expand_builtin (tree exp, rtx target, rtx subtarget, |
| + enum machine_mode mode, int ignore); |
| + |
| +extern bool avr32_must_pass_in_stack (enum machine_mode mode, tree type); |
| + |
| +extern bool avr32_strict_argument_naming (CUMULATIVE_ARGS * ca); |
| + |
| +extern bool avr32_pass_by_reference (CUMULATIVE_ARGS * cum, |
| + enum machine_mode mode, |
| + tree type, bool named); |
| + |
| +extern rtx avr32_gen_load_multiple (rtx * regs, int count, rtx from, |
| + int write_back, int in_struct_p, |
| + int scalar_p); |
| +extern rtx avr32_gen_store_multiple (rtx * regs, int count, rtx to, |
| + int in_struct_p, int scalar_p); |
| +extern int avr32_gen_movmemsi (rtx * operands); |
| + |
| +extern int avr32_rnd_operands (rtx add, rtx shift); |
| +extern int avr32_adjust_insn_length (rtx insn, int length); |
| + |
| +extern int symbol_mentioned_p (rtx x); |
| +extern int label_mentioned_p (rtx x); |
| +extern rtx legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg); |
| +extern int avr32_address_register_rtx_p (rtx x, int strict_p); |
| +extern int avr32_legitimate_index_p (enum machine_mode mode, rtx index, |
| + int strict_p); |
| + |
| +extern int avr32_const_double_immediate (rtx value); |
| +extern void avr32_init_expanders (void); |
| +extern rtx avr32_return_addr (int count, rtx frame); |
| +extern bool avr32_got_mentioned_p (rtx addr); |
| + |
| +extern void avr32_final_prescan_insn (rtx insn, rtx * opvec, int noperands); |
| + |
| +extern int avr32_expand_movcc (enum machine_mode mode, rtx operands[]); |
| +extern int avr32_expand_addcc (enum machine_mode mode, rtx operands[]); |
| +#ifdef RTX_CODE |
| +extern int avr32_expand_scc (RTX_CODE cond, rtx * operands); |
| +#endif |
| + |
| +extern int avr32_store_bypass (rtx insn_out, rtx insn_in); |
| +extern int avr32_mul_waw_bypass (rtx insn_out, rtx insn_in); |
| +extern int avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in); |
| +extern int avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in); |
| +extern rtx avr32_output_cmp (rtx cond, enum machine_mode mode, |
| + rtx op0, rtx op1); |
| + |
| +rtx get_next_insn_cond (rtx cur_insn); |
| +int set_next_insn_cond (rtx cur_insn, rtx cond); |
| +void avr32_override_options (void); |
| +void avr32_load_pic_register (void); |
| +#ifdef GCC_BASIC_BLOCK_H |
| +rtx avr32_ifcvt_modify_insn (ce_if_block_t *ce_info, rtx pattern, rtx insn, |
| + int *num_true_changes); |
| +rtx avr32_ifcvt_modify_test (ce_if_block_t *ce_info, rtx test ); |
| +void avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info, int *num_true_changes); |
| +#endif |
| +void avr32_optimization_options (int level, int size); |
| +int avr32_const_ok_for_move (HOST_WIDE_INT c); |
| + |
| +void avr32_split_const_expr (enum machine_mode mode, |
| + enum machine_mode new_mode, |
| + rtx expr, |
| + rtx *split_expr); |
| +void avr32_get_intval (enum machine_mode mode, |
| + rtx const_expr, |
| + HOST_WIDE_INT *val); |
| + |
| +int avr32_cond_imm_clobber_splittable (rtx insn, |
| + rtx operands[]); |
| + |
| + |
| +#endif /* AVR32_PROTOS_H */ |
| --- /dev/null |
| +++ b/gcc/config/avr32/crti.asm |
| @@ -0,0 +1,64 @@ |
| +/* |
| + Init/fini stuff for AVR32. |
| + Copyright 2003-2006 Atmel Corporation. |
| + |
| + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| + |
| + This file is part of GCC. |
| + |
| + This program is free software; you can redistribute it and/or modify |
| + it under the terms of the GNU General Public License as published by |
| + the Free Software Foundation; either version 2 of the License, or |
| + (at your option) any later version. |
| + |
| + This program is distributed in the hope that it will be useful, |
| + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + GNU General Public License for more details. |
| + |
| + You should have received a copy of the GNU General Public License |
| + along with this program; if not, write to the Free Software |
| + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ |
| + |
| + |
| +/* The code in sections .init and .fini is supposed to be a single |
| + regular function. The function in .init is called directly from |
| + start in crt1.asm. The function in .fini is atexit()ed in crt1.asm |
| + too. |
| + |
| + crti.asm contributes the prologue of a function to these sections, |
| + and crtn.asm comes up the epilogue. STARTFILE_SPEC should list |
| + crti.o before any other object files that might add code to .init |
| + or .fini sections, and ENDFILE_SPEC should list crtn.o after any |
| + such object files. */ |
| + |
| + .file "crti.asm" |
| + |
| + .section ".init" |
| +/* Just load the GOT */ |
| + .align 2 |
| + .global _init |
| +_init: |
| + stm --sp, r6, lr |
| + lddpc r6, 1f |
| +0: |
| + rsub r6, pc |
| + rjmp 2f |
| + .align 2 |
| +1: .long 0b - _GLOBAL_OFFSET_TABLE_ |
| +2: |
| + |
| + .section ".fini" |
| +/* Just load the GOT */ |
| + .align 2 |
| + .global _fini |
| +_fini: |
| + stm --sp, r6, lr |
| + lddpc r6, 1f |
| +0: |
| + rsub r6, pc |
| + rjmp 2f |
| + .align 2 |
| +1: .long 0b - _GLOBAL_OFFSET_TABLE_ |
| +2: |
| + |
| --- /dev/null |
| +++ b/gcc/config/avr32/crtn.asm |
| @@ -0,0 +1,44 @@ |
| +/* Copyright (C) 2001 Free Software Foundation, Inc. |
| + Written By Nick Clifton |
| + |
| + This file is free software; you can redistribute it and/or modify it |
| + under the terms of the GNU General Public License as published by the |
| + Free Software Foundation; either version 2, or (at your option) any |
| + later version. |
| + |
| + In addition to the permissions in the GNU General Public License, the |
| + Free Software Foundation gives you unlimited permission to link the |
| + compiled version of this file with other programs, and to distribute |
| + those programs without any restriction coming from the use of this |
| + file. (The General Public License restrictions do apply in other |
| + respects; for example, they cover modification of the file, and |
| + distribution when not linked into another program.) |
| + |
| + This file is distributed in the hope that it will be useful, but |
| + WITHOUT ANY WARRANTY; without even the implied warranty of |
| + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| + General Public License for more details. |
| + |
| + You should have received a copy of the GNU General Public License |
| + along with this program; see the file COPYING. If not, write to |
| + the Free Software Foundation, 59 Temple Place - Suite 330, |
| + Boston, MA 02111-1307, USA. |
| + |
| + As a special exception, if you link this library with files |
| + compiled with GCC to produce an executable, this does not cause |
| + the resulting executable to be covered by the GNU General Public License. |
| + This exception does not however invalidate any other reasons why |
| + the executable file might be covered by the GNU General Public License. |
| +*/ |
| + |
| + |
| + |
| + |
| + .file "crtn.asm" |
| + |
| + .section ".init" |
| + ldm sp++, r6, pc |
| + |
| + .section ".fini" |
| + ldm sp++, r6, pc |
| + |
| --- /dev/null |
| +++ b/gcc/config/avr32/fpcp.md |
| @@ -0,0 +1,551 @@ |
| +;; AVR32 machine description file for Floating-Point instructions. |
| +;; Copyright 2003-2006 Atmel Corporation. |
| +;; |
| +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| +;; |
| +;; This file is part of GCC. |
| +;; |
| +;; This program is free software; you can redistribute it and/or modify |
| +;; it under the terms of the GNU General Public License as published by |
| +;; the Free Software Foundation; either version 2 of the License, or |
| +;; (at your option) any later version. |
| +;; |
| +;; This program is distributed in the hope that it will be useful, |
| +;; but WITHOUT ANY WARRANTY; without even the implied warranty of |
| +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| +;; GNU General Public License for more details. |
| +;; |
| +;; You should have received a copy of the GNU General Public License |
| +;; along with this program; if not, write to the Free Software |
| +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| + |
| +;; -*- Mode: Scheme -*- |
| + |
| +;;****************************************************************************** |
| +;; Automaton pipeline description for floating-point coprocessor insns |
| +;;****************************************************************************** |
| +(define_cpu_unit "fid,fm1,fm2,fm3,fm4,fwb,fcmp,fcast" "avr32_ap") |
| + |
| +(define_insn_reservation "fmv_op" 1 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "fmv")) |
| + "is,da,d,fid,fwb") |
| + |
| +(define_insn_reservation "fmul_op" 5 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "fmul")) |
| + "is,da,d,fid,fm1,fm2,fm3,fm4,fwb") |
| + |
| +(define_insn_reservation "fcmps_op" 1 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "fcmps")) |
| + "is,da,d,fid,fcmp") |
| + |
| +(define_insn_reservation "fcmpd_op" 2 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "fcmpd")) |
| + "is,da,d,fid*2,fcmp") |
| + |
| +(define_insn_reservation "fcast_op" 3 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "fcast")) |
| + "is,da,d,fid,fcmp,fcast,fwb") |
| + |
| +(define_insn_reservation "fmvcpu_op" 2 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "fmvcpu")) |
| + "is,da,d") |
| + |
| +(define_insn_reservation "fldd_op" 1 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "fldd")) |
| + "is,da,d,fwb") |
| + |
| +(define_insn_reservation "flds_op" 1 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "flds")) |
| + "is,da,d,fwb") |
| + |
| +(define_insn_reservation "fsts_op" 0 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "fsts")) |
| + "is,da*2,d") |
| + |
| +(define_insn_reservation "fstd_op" 0 |
| + (and (eq_attr "pipeline" "ap") |
| + (eq_attr "type" "fstd")) |
| + "is,da*2,d") |
| + |
| + |
| +(define_insn "*movsf_fpcp" |
| + [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,r,f,m,r,r,r,m") |
| + (match_operand:SF 1 "general_operand" " f,r,f,m,f,r,G,m,r"))] |
| + "TARGET_HARD_FLOAT" |
| + "@ |
| + fmov.s\t%0, %1 |
| + fmov.s\t%0, %1 |
| + fmov.s\t%0, %1 |
| + fld.s\t%0, %1 |
| + fst.s\t%0, %1 |
| + mov\t%0, %1 |
| + mov\t%0, %1 |
| + ld.w\t%0, %1 |
| + st.w\t%0, %1" |
| + [(set_attr "length" "4,4,4,4,4,2,4,4,4") |
| + (set_attr "type" "fmv,flds,fmvcpu,flds,fsts,alu,alu,load,store")]) |
| + |
| +(define_insn_and_split "*movdf_fpcp" |
| + [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,r,f,m,r,r,m") |
| + (match_operand:DF 1 "general_operand" " f,r,f,m,f,r,m,r"))] |
| + "TARGET_HARD_FLOAT" |
| + "@ |
| + fmov.d\t%0, %1 |
| + fmov.d\t%0, %1 |
| + fmov.d\t%0, %1 |
| + fld.d\t%0, %1 |
| + fst.d\t%0, %1 |
| + mov\t%0, %1\;mov\t%m0, %m1 |
| + ld.d\t%0, %1 |
| + st.d\t%0, %1" |
| + |
| + "TARGET_HARD_FLOAT |
| + && reload_completed |
| + && (REG_P(operands[0]) && (REGNO_REG_CLASS(REGNO(operands[0])) == GENERAL_REGS)) |
| + && (REG_P(operands[1]) && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS))" |
| + [(set (match_dup 0) (match_dup 1)) |
| + (set (match_dup 2) (match_dup 3))] |
| + " |
| + { |
| + operands[2] = gen_highpart (SImode, operands[0]); |
| + operands[0] = gen_lowpart (SImode, operands[0]); |
| + operands[3] = gen_highpart(SImode, operands[1]); |
| + operands[1] = gen_lowpart(SImode, operands[1]); |
| + } |
| + " |
| + |
| + [(set_attr "length" "4,4,4,4,4,4,4,4") |
| + (set_attr "type" "fmv,fldd,fmvcpu,fldd,fstd,alu2,load2,store2")]) |
| + |
| + |
| +(define_insn "mulsf3" |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:SF 2 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + "fmul.s\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_insn "nmulsf3" |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| + (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:SF 2 "avr32_fp_register_operand" "f"))))] |
| + "TARGET_HARD_FLOAT" |
| + "fnmul.s\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "") |
| + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "") |
| + (match_operand:SF 2 "avr32_fp_register_operand" ""))) |
| + (set (match_operand:SF 3 "avr32_fp_register_operand" "") |
| + (neg:SF (match_dup 0)))] |
| + "TARGET_HARD_FLOAT && |
| + (peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[3]) == REGNO(operands[0])))" |
| + [(set (match_dup 3) |
| + (neg:SF (mult:SF (match_dup 1) |
| + (match_dup 2))))] |
| +) |
| + |
| + |
| +(define_insn "macsf3" |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| + (plus:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:SF 2 "avr32_fp_register_operand" "f")) |
| + (match_operand:SF 3 "avr32_fp_register_operand" "0")))] |
| + "TARGET_HARD_FLOAT" |
| + "fmac.s\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_insn "nmacsf3" |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| + (plus:SF (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:SF 2 "avr32_fp_register_operand" "f"))) |
| + (match_operand:SF 3 "avr32_fp_register_operand" "0")))] |
| + "TARGET_HARD_FLOAT" |
| + "fnmac.s\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "") |
| + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "") |
| + (match_operand:SF 2 "avr32_fp_register_operand" ""))) |
| + (set (match_operand:SF 3 "avr32_fp_register_operand" "") |
| + (minus:SF |
| + (match_dup 3) |
| + (match_dup 0)))] |
| + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])" |
| + [(set (match_dup 3) |
| + (plus:SF (neg:SF (mult:SF (match_dup 1) |
| + (match_dup 2))) |
| + (match_dup 3)))] |
| +) |
| + |
| + |
| +(define_insn "msubacsf3" |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| + (minus:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:SF 2 "avr32_fp_register_operand" "f")) |
| + (match_operand:SF 3 "avr32_fp_register_operand" "0")))] |
| + "TARGET_HARD_FLOAT" |
| + "fmsc.s\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "") |
| + (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "") |
| + (match_operand:SF 2 "avr32_fp_register_operand" ""))) |
| + (set (match_operand:SF 3 "avr32_fp_register_operand" "") |
| + (minus:SF |
| + (match_dup 0) |
| + (match_dup 3)))] |
| + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])" |
| + [(set (match_dup 3) |
| + (minus:SF (mult:SF (match_dup 1) |
| + (match_dup 2)) |
| + (match_dup 3)))] |
| +) |
| + |
| +(define_insn "nmsubacsf3" |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| + (minus:SF (neg:SF (mult:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:SF 2 "avr32_fp_register_operand" "f"))) |
| + (match_operand:SF 3 "avr32_fp_register_operand" "0")))] |
| + "TARGET_HARD_FLOAT" |
| + "fnmsc.s\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| + |
| + |
| +(define_insn "addsf3" |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| + (plus:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:SF 2 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + "fadd.s\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_insn "subsf3" |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| + (minus:SF (match_operand:SF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:SF 2 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + "fsub.s\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| + |
| +(define_insn "negsf2" |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| + (neg:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + "fneg.s\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmv")]) |
| + |
| +(define_insn "abssf2" |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| + (abs:SF (match_operand:SF 1 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + "fabs.s\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmv")]) |
| + |
| +(define_insn "truncdfsf2" |
| + [(set (match_operand:SF 0 "avr32_fp_register_operand" "=f") |
| + (float_truncate:SF |
| + (match_operand:DF 1 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + "fcastd.s\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fcast")]) |
| + |
| +(define_insn "extendsfdf2" |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| + (float_extend:DF |
| + (match_operand:SF 1 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + "fcasts.d\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fcast")]) |
| + |
| +(define_insn "muldf3" |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:DF 2 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + "fmul.d\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_insn "nmuldf3" |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| + (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:DF 2 "avr32_fp_register_operand" "f"))))] |
| + "TARGET_HARD_FLOAT" |
| + "fnmul.d\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "") |
| + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "") |
| + (match_operand:DF 2 "avr32_fp_register_operand" ""))) |
| + (set (match_operand:DF 3 "avr32_fp_register_operand" "") |
| + (neg:DF (match_dup 0)))] |
| + "TARGET_HARD_FLOAT && |
| + (peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[3]) == REGNO(operands[0])))" |
| + [(set (match_dup 3) |
| + (neg:DF (mult:DF (match_dup 1) |
| + (match_dup 2))))] |
| +) |
| + |
| +(define_insn "macdf3" |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| + (plus:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:DF 2 "avr32_fp_register_operand" "f")) |
| + (match_operand:DF 3 "avr32_fp_register_operand" "0")))] |
| + "TARGET_HARD_FLOAT" |
| + "fmac.d\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_insn "msubacdf3" |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| + (minus:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:DF 2 "avr32_fp_register_operand" "f")) |
| + (match_operand:DF 3 "avr32_fp_register_operand" "0")))] |
| + "TARGET_HARD_FLOAT" |
| + "fmsc.d\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "") |
| + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "") |
| + (match_operand:DF 2 "avr32_fp_register_operand" ""))) |
| + (set (match_operand:DF 3 "avr32_fp_register_operand" "") |
| + (minus:DF |
| + (match_dup 0) |
| + (match_dup 3)))] |
| + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])" |
| + [(set (match_dup 3) |
| + (minus:DF (mult:DF (match_dup 1) |
| + (match_dup 2)) |
| + (match_dup 3)))] |
| + ) |
| + |
| +(define_insn "nmsubacdf3" |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| + (minus:DF (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:DF 2 "avr32_fp_register_operand" "f"))) |
| + (match_operand:DF 3 "avr32_fp_register_operand" "0")))] |
| + "TARGET_HARD_FLOAT" |
| + "fnmsc.d\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_insn "nmacdf3" |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| + (plus:DF (neg:DF (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:DF 2 "avr32_fp_register_operand" "f"))) |
| + (match_operand:DF 3 "avr32_fp_register_operand" "0")))] |
| + "TARGET_HARD_FLOAT" |
| + "fnmac.d\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_peephole2 |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "") |
| + (mult:DF (match_operand:DF 1 "avr32_fp_register_operand" "") |
| + (match_operand:DF 2 "avr32_fp_register_operand" ""))) |
| + (set (match_operand:DF 3 "avr32_fp_register_operand" "") |
| + (minus:DF |
| + (match_dup 3) |
| + (match_dup 0)))] |
| + "TARGET_HARD_FLOAT && peep2_reg_dead_p(2, operands[0])" |
| + [(set (match_dup 3) |
| + (plus:DF (neg:DF (mult:DF (match_dup 1) |
| + (match_dup 2))) |
| + (match_dup 3)))] |
| +) |
| + |
| +(define_insn "adddf3" |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| + (plus:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:DF 2 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + "fadd.d\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_insn "subdf3" |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| + (minus:DF (match_operand:DF 1 "avr32_fp_register_operand" "f") |
| + (match_operand:DF 2 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + "fsub.d\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmul")]) |
| + |
| +(define_insn "negdf2" |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| + (neg:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + "fneg.d\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmv")]) |
| + |
| +(define_insn "absdf2" |
| + [(set (match_operand:DF 0 "avr32_fp_register_operand" "=f") |
| + (abs:DF (match_operand:DF 1 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + "fabs.d\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmv")]) |
| + |
| + |
| +(define_expand "cmpdf" |
| + [(set (cc0) |
| + (compare:DF |
| + (match_operand:DF 0 "general_operand" "") |
| + (match_operand:DF 1 "general_operand" "")))] |
| + "TARGET_HARD_FLOAT" |
| + "{ |
| + rtx tmpreg; |
| + if ( !REG_P(operands[0]) ) |
| + operands[0] = force_reg(DFmode, operands[0]); |
| + |
| + if ( !REG_P(operands[1]) ) |
| + operands[1] = force_reg(DFmode, operands[1]); |
| + |
| + avr32_compare_op0 = operands[0]; |
| + avr32_compare_op1 = operands[1]; |
| + |
| + emit_insn(gen_cmpdf_internal(operands[0], operands[1])); |
| + |
| + tmpreg = gen_reg_rtx(SImode); |
| + emit_insn(gen_fpcc_to_reg(tmpreg)); |
| + emit_insn(gen_reg_to_cc(tmpreg)); |
| + |
| + DONE; |
| + }" |
| +) |
| + |
| +(define_insn "cmpdf_internal" |
| + [(set (reg:CC FPCC_REGNUM) |
| + (compare:CC |
| + (match_operand:DF 0 "avr32_fp_register_operand" "f") |
| + (match_operand:DF 1 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + { |
| + if (!rtx_equal_p(cc_prev_status.mdep.fpvalue, SET_SRC(PATTERN (insn))) ) |
| + return "fcmp.d\t%0, %1"; |
| + return ""; |
| + } |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fcmpd") |
| + (set_attr "cc" "fpcompare")]) |
| + |
| +(define_expand "cmpsf" |
| + [(set (cc0) |
| + (compare:SF |
| + (match_operand:SF 0 "general_operand" "") |
| + (match_operand:SF 1 "general_operand" "")))] |
| + "TARGET_HARD_FLOAT" |
| + "{ |
| + rtx tmpreg; |
| + if ( !REG_P(operands[0]) ) |
| + operands[0] = force_reg(SFmode, operands[0]); |
| + |
| + if ( !REG_P(operands[1]) ) |
| + operands[1] = force_reg(SFmode, operands[1]); |
| + |
| + avr32_compare_op0 = operands[0]; |
| + avr32_compare_op1 = operands[1]; |
| + |
| + emit_insn(gen_cmpsf_internal(operands[0], operands[1])); |
| + |
| + tmpreg = gen_reg_rtx(SImode); |
| + emit_insn(gen_fpcc_to_reg(tmpreg)); |
| + emit_insn(gen_reg_to_cc(tmpreg)); |
| + |
| + DONE; |
| + }" |
| +) |
| + |
| +(define_insn "cmpsf_internal" |
| + [(set (reg:CC FPCC_REGNUM) |
| + (compare:CC |
| + (match_operand:SF 0 "avr32_fp_register_operand" "f") |
| + (match_operand:SF 1 "avr32_fp_register_operand" "f")))] |
| + "TARGET_HARD_FLOAT" |
| + { |
| + if (!rtx_equal_p(cc_prev_status.mdep.fpvalue, SET_SRC(PATTERN (insn))) ) |
| + return "fcmp.s\t%0, %1"; |
| + return ""; |
| + } |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fcmps") |
| + (set_attr "cc" "fpcompare")]) |
| + |
| +(define_insn "fpcc_to_reg" |
| + [(set (match_operand:SI 0 "register_operand" "=r") |
| + (unspec:SI [(reg:CC FPCC_REGNUM)] |
| + UNSPEC_FPCC_TO_REG))] |
| + "TARGET_HARD_FLOAT" |
| + "fmov.s\t%0, fsr" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "fmvcpu")]) |
| + |
| +(define_insn "reg_to_cc" |
| + [(set (cc0) |
| + (unspec:SI [(match_operand:SI 0 "register_operand" "r")] |
| + UNSPEC_REG_TO_CC))] |
| + "TARGET_HARD_FLOAT" |
| + "musfr\t%0" |
| + [(set_attr "length" "2") |
| + (set_attr "type" "alu") |
| + (set_attr "cc" "from_fpcc")]) |
| + |
| +(define_insn "stm_fp" |
| + [(unspec [(match_operand 0 "register_operand" "r") |
| + (match_operand 1 "const_int_operand" "") |
| + (match_operand 2 "const_int_operand" "")] |
| + UNSPEC_STMFP)] |
| + "TARGET_HARD_FLOAT" |
| + { |
| + int cop_reglist = INTVAL(operands[1]); |
| + |
| + if (INTVAL(operands[2]) != 0) |
| + return "stcm.w\tcp0, --%0, %C1"; |
| + else |
| + return "stcm.w\tcp0, %0, %C1"; |
| + |
| + if ( cop_reglist & ~0xff ){ |
| + operands[1] = GEN_INT(cop_reglist & ~0xff); |
| + if (INTVAL(operands[2]) != 0) |
| + return "stcm.d\tcp0, --%0, %D1"; |
| + else |
| + return "stcm.d\tcp0, %0, %D1"; |
| + } |
| + } |
| + [(set_attr "type" "fstm") |
| + (set_attr "length" "4") |
| + (set_attr "cc" "none")]) |
| --- /dev/null |
| +++ b/gcc/config/avr32/lib1funcs.S |
| @@ -0,0 +1,2874 @@ |
| +/* Macro for moving immediate value to register. */ |
| +.macro mov_imm reg, imm |
| +.if (((\imm & 0xfffff) == \imm) || ((\imm | 0xfff00000) == \imm)) |
| + mov \reg, \imm |
| +#if __AVR32_UC__ >= 2 |
| +.elseif ((\imm & 0xffff) == 0) |
| + movh \reg, hi(\imm) |
| + |
| +#endif |
| +.else |
| + mov \reg, lo(\imm) |
| + orh \reg, hi(\imm) |
| +.endif |
| +.endm |
| + |
| + |
| + |
| +/* Adjust the unpacked double number if it is a subnormal number. |
| + The exponent and mantissa pair are stored |
| + in [mant_hi,mant_lo] and [exp]. A register with the correct sign bit in |
| + the MSB is passed in [sign]. Needs two scratch |
| + registers [scratch1] and [scratch2]. An adjusted and packed double float |
| + is present in [mant_hi,mant_lo] after macro has executed */ |
| +.macro adjust_subnormal_df exp, mant_lo, mant_hi, sign, scratch1, scratch2 |
| + /* We have an exponent which is <=0 indicating a subnormal number |
| + As it should be stored as if the exponent was 1 (although the |
| + exponent field is all zeros to indicate a subnormal number) |
| + we have to shift down the mantissa to its correct position. */ |
| + neg \exp |
| + sub \exp,-1 /* amount to shift down */ |
| + cp.w \exp,54 |
| + brlo 50f /* if more than 53 shift steps, the |
| + entire mantissa will disappear |
| + without any rounding to occur */ |
| + mov \mant_hi, 0 |
| + mov \mant_lo, 0 |
| + rjmp 52f |
| +50: |
| + sub \exp,-10 /* do the shift to position the |
| + mantissa at the same time |
| + note! this does not include the |
| + final 1 step shift to add the sign */ |
| + |
| + /* when shifting, save all shifted out bits in [scratch2]. we may need to |
| + look at them to make correct rounding. */ |
| + |
| + rsub \scratch1,\exp,32 /* get inverted shift count */ |
| + cp.w \exp,32 /* handle shifts >= 32 separately */ |
| + brhs 51f |
| + |
| + /* small (<32) shift amount, both words are part of the shift */ |
| + lsl \scratch2,\mant_lo,\scratch1 /* save bits to shift out from lsw*/ |
| + lsl \scratch1,\mant_hi,\scratch1 /* get bits from msw destined for lsw*/ |
| + lsr \mant_lo,\mant_lo,\exp /* shift down lsw */ |
| + lsr \mant_hi,\mant_hi,\exp /* shift down msw */ |
| + or \mant_hi,\scratch1 /* add bits from msw with prepared lsw */ |
| + rjmp 50f |
| + |
| + /* large (>=32) shift amount, only lsw will have bits left after shift. |
| + note that shift operations will use ((shift count) mod 32) so |
| + we do not need to subtract 32 from shift count. */ |
| +51: |
| + lsl \scratch2,\mant_hi,\scratch1 /* save bits to shift out from msw */ |
| + or \scratch2,\mant_lo /* also save all bits from lsw */ |
| + mov \mant_lo,\mant_hi /* msw -> lsw (i.e. "shift 32 first") */ |
| + mov \mant_hi,0 /* clear msw */ |
| + lsr \mant_lo,\mant_lo,\exp /* make rest of shift inside lsw */ |
| + |
| +50: |
| + /* result is almost ready to return, except that least significant bit |
| + and the part we already shifted out may cause the result to be |
| + rounded */ |
| + bld \mant_lo,0 /* get bit to be shifted out */ |
| + brcc 51f /* if bit was 0, no rounding */ |
| + |
| + /* msb of part to remove is 1, so rounding depends on rest of bits */ |
| + tst \scratch2,\scratch2 /* get shifted out tail */ |
| + brne 50f /* if rest > 0, do round */ |
| + bld \mant_lo,1 /* we have to look at lsb in result */ |
| + brcc 51f /* if lsb is 0, don't round */ |
| + |
| +50: |
| + /* subnormal result requires rounding |
| + rounding may cause subnormal to become smallest normal number |
| + luckily, smallest normal number has exactly the representation |
| + we got by rippling a one bit up from mantissa into exponent field. */ |
| + sub \mant_lo,-1 |
| + subcc \mant_hi,-1 |
| + |
| +51: |
| + /* shift and return packed double with correct sign */ |
| + rol \sign |
| + ror \mant_hi |
| + ror \mant_lo |
| +52: |
| +.endm |
| + |
| + |
| +/* Adjust subnormal single float number with exponent [exp] |
| + and mantissa [mant] and round. */ |
| +.macro adjust_subnormal_sf sf, exp, mant, sign, scratch |
| + /* subnormal number */ |
| + rsub \exp,\exp, 1 /* shift amount */ |
| + cp.w \exp, 25 |
| + movhs \mant, 0 |
| + brhs 90f /* Return zero */ |
| + rsub \scratch, \exp, 32 |
| + lsl \scratch, \mant,\scratch/* Check if there are any bits set |
| + in the bits discarded in the mantissa */ |
| + srne \scratch /* If so set the lsb of the shifted mantissa */ |
| + lsr \mant,\mant,\exp /* Shift the mantissa */ |
| + or \mant, \scratch /* Round lsb if any bits were shifted out */ |
| + /* Rounding : For explaination, see round_sf. */ |
| + mov \scratch, 0x7f /* Set rounding constant */ |
| + bld \mant, 8 |
| + subeq \scratch, -1 /* For odd numbers use rounding constant 0x80 */ |
| + add \mant, \scratch /* Add rounding constant to mantissa */ |
| + /* We can't overflow because mantissa is at least shifted one position |
| + to the right so the implicit bit is zero. We can however get the implicit |
| + bit set after rounding which means that we have the lowest normal number |
| + but this is ok since this bit has the same position as the LSB of the |
| + exponent */ |
| + lsr \sf, \mant, 7 |
| + /* Rotate in sign */ |
| + lsl \sign, 1 |
| + ror \sf |
| +90: |
| +.endm |
| + |
| + |
| +/* Round the unpacked df number with exponent [exp] and |
| + mantissa [mant_hi, mant_lo]. Uses scratch register |
| + [scratch] */ |
| +.macro round_df exp, mant_lo, mant_hi, scratch |
| + mov \scratch, 0x3ff /* Rounding constant */ |
| + bld \mant_lo,11 /* Check if lsb in the final result is |
| + set */ |
| + subeq \scratch, -1 /* Adjust rounding constant to 0x400 |
| + if rounding 0.5 upwards */ |
| + add \mant_lo, \scratch /* Round */ |
| + acr \mant_hi /* If overflowing we know that |
| + we have all zeros in the bits not |
| + scaled out so we can leave them |
| + but we must increase the exponent with |
| + two since we had an implicit bit |
| + which is lost + the extra overflow bit */ |
| + subcs \exp, -2 /* Update exponent */ |
| +.endm |
| + |
| +/* Round single float number stored in [mant] and [exp] */ |
| +.macro round_sf exp, mant, scratch |
| + /* Round: |
| + For 0.5 we round to nearest even integer |
| + for all other cases we round to nearest integer. |
| + This means that if the digit left of the "point" (.) |
| + is 1 we can add 0x80 to the mantissa since the |
| + corner case 0x180 will round up to 0x200. If the |
| + digit left of the "point" is 0 we will have to |
| + add 0x7f since this will give 0xff and hence a |
| + truncation/rounding downwards for the corner |
| + case when the 9 lowest bits are 0x080 */ |
| + mov \scratch, 0x7f /* Set rounding constant */ |
| + /* Check if the mantissa is even or odd */ |
| + bld \mant, 8 |
| + subeq \scratch, -1 /* Rounding constant should be 0x80 */ |
| + add \mant, \scratch |
| + subcs \exp, -2 /* Adjust exponent if we overflowed */ |
| +.endm |
| + |
| + |
| + |
| +/* Pack a single float number stored in [mant] and [exp] |
| + into a single float number in [sf] */ |
| +.macro pack_sf sf, exp, mant |
| + bld \mant,31 /* implicit bit to z */ |
| + subne \exp,1 /* if subnormal (implicit bit 0) |
| + adjust exponent to storage format */ |
| + |
| + lsr \sf, \mant, 7 |
| + bfins \sf, \exp, 24, 8 |
| +.endm |
| + |
| +/* Pack exponent [exp] and mantissa [mant_hi, mant_lo] |
| + into [df_hi, df_lo]. [df_hi] is shifted |
| + one bit up so the sign bit can be shifted into it */ |
| + |
| +.macro pack_df exp, mant_lo, mant_hi, df_lo, df_hi |
| + bld \mant_hi,31 /* implicit bit to z */ |
| + subne \exp,1 /* if subnormal (implicit bit 0) |
| + adjust exponent to storage format */ |
| + |
| + lsr \mant_lo,11 /* shift back lsw */ |
| + or \df_lo,\mant_lo,\mant_hi<<21 /* combine with low bits from msw */ |
| + lsl \mant_hi,1 /* get rid of implicit bit */ |
| + lsr \mant_hi,11 /* shift back msw except for one step*/ |
| + or \df_hi,\mant_hi,\exp<<21 /* combine msw with exponent */ |
| +.endm |
| + |
| +/* Normalize single float number stored in [mant] and [exp] |
| + using scratch register [scratch] */ |
| +.macro normalize_sf exp, mant, scratch |
| + /* Adjust exponent and mantissa */ |
| + clz \scratch, \mant |
| + sub \exp, \scratch |
| + lsl \mant, \mant, \scratch |
| +.endm |
| + |
| +/* Normalize the exponent and mantissa pair stored |
| + in [mant_hi,mant_lo] and [exp]. Needs two scratch |
| + registers [scratch1] and [scratch2]. */ |
| +.macro normalize_df exp, mant_lo, mant_hi, scratch1, scratch2 |
| + clz \scratch1,\mant_hi /* Check if we have zeros in high bits */ |
| + breq 80f /* No need for scaling if no zeros in high bits */ |
| + brcs 81f /* Check for all zeros */ |
| + |
| + /* shift amount is smaller than 32, and involves both msw and lsw*/ |
| + rsub \scratch2,\scratch1,32 /* shift mantissa */ |
| + lsl \mant_hi,\mant_hi,\scratch1 |
| + lsr \scratch2,\mant_lo,\scratch2 |
| + or \mant_hi,\scratch2 |
| + lsl \mant_lo,\mant_lo,\scratch1 |
| + sub \exp,\scratch1 /* adjust exponent */ |
| + rjmp 80f /* Finished */ |
| +81: |
| + /* shift amount is greater than 32 */ |
| + clz \scratch1,\mant_lo /* shift mantissa */ |
| + movcs \scratch1, 0 |
| + subcc \scratch1,-32 |
| + lsl \mant_hi,\mant_lo,\scratch1 |
| + mov \mant_lo,0 |
| + sub \exp,\scratch1 /* adjust exponent */ |
| +80: |
| +.endm |
| + |
| + |
| +/* Fast but approximate multiply of two 64-bit numbers to give a 64 bit result. |
| + The multiplication of [al]x[bl] is discarded. |
| + Operands in [ah], [al], [bh], [bl]. |
| + Scratch registers in [sh], [sl]. |
| + Returns results in registers [rh], [rl].*/ |
| +.macro mul_approx_df ah, al, bh, bl, rh, rl, sh, sl |
| + mulu.d \sl, \ah, \bl |
| + macu.d \sl, \al, \bh |
| + mulu.d \rl, \ah, \bh |
| + add \rl, \sh |
| + acr \rh |
| +.endm |
| + |
| + |
| + |
| +#if defined(L_avr32_f64_mul) || defined(L_avr32_f64_mul_fast) |
| + .align 2 |
| +#if defined(L_avr32_f64_mul) |
| + .global __avr32_f64_mul |
| + .type __avr32_f64_mul,@function |
| +__avr32_f64_mul: |
| +#else |
| + .global __avr32_f64_mul_fast |
| + .type __avr32_f64_mul_fast,@function |
| +__avr32_f64_mul_fast: |
| +#endif |
| + or r12, r10, r11 << 1 |
| + breq __avr32_f64_mul_op1_zero |
| + |
| +#if defined(L_avr32_f64_mul) |
| + pushm r4-r7, lr |
| +#else |
| + stm --sp, r5,r6,r7,lr |
| +#endif |
| + |
| +#define AVR32_F64_MUL_OP1_INT_BITS 1 |
| +#define AVR32_F64_MUL_OP2_INT_BITS 10 |
| +#define AVR32_F64_MUL_RES_INT_BITS 11 |
| + |
| + /* op1 in {r11,r10}*/ |
| + /* op2 in {r9,r8}*/ |
| + eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */ |
| + |
| + /* Unpack op1 to 1.63 format*/ |
| + /* exp: r7 */ |
| + /* sf: r11, r10 */ |
| + bfextu r7, r11, 20, 11 /* Extract exponent */ |
| + |
| + mov r5, 1 |
| + |
| + /* Check if normalization is needed */ |
| + breq __avr32_f64_mul_op1_subnormal /*If number is subnormal, normalize it */ |
| + |
| + lsl r11, (12-AVR32_F64_MUL_OP1_INT_BITS-1) /* Extract mantissa, leave room for implicit bit */ |
| + or r11, r11, r10>>(32-(12-AVR32_F64_MUL_OP1_INT_BITS-1)) |
| + lsl r10, (12-AVR32_F64_MUL_OP1_INT_BITS-1) |
| + bfins r11, r5, 32 - (1 + AVR32_F64_MUL_OP1_INT_BITS), 1 + AVR32_F64_MUL_OP1_INT_BITS /* Insert implicit bit */ |
| + |
| + |
| +22: |
| + /* Unpack op2 to 10.54 format */ |
| + /* exp: r6 */ |
| + /* sf: r9, r8 */ |
| + bfextu r6, r9, 20, 11 /* Extract exponent */ |
| + |
| + /* Check if normalization is needed */ |
| + breq __avr32_f64_mul_op2_subnormal /*If number is subnormal, normalize it */ |
| + |
| + lsl r8, 1 /* Extract mantissa, leave room for implicit bit */ |
| + rol r9 |
| + bfins r9, r5, 32 - (1 + AVR32_F64_MUL_OP2_INT_BITS), 1 + AVR32_F64_MUL_OP2_INT_BITS /* Insert implicit bit */ |
| + |
| +23: |
| + |
| + /* Check if any operands are NaN or INF */ |
| + cp r7, 0x7ff |
| + breq __avr32_f64_mul_op_nan_or_inf /* Check op1 for NaN or Inf */ |
| + cp r6, 0x7ff |
| + breq __avr32_f64_mul_op_nan_or_inf /* Check op2 for NaN or Inf */ |
| + |
| + |
| + /* Calculate new exponent in r12*/ |
| + add r12, r7, r6 |
| + sub r12, (1023-1) |
| + |
| +#if defined(L_avr32_f64_mul) |
| + /* Do the multiplication. |
| + Place result in [r11, r10, r7, r6]. The result is in 11.117 format. */ |
| + mulu.d r4, r11, r8 |
| + macu.d r4, r10, r9 |
| + mulu.d r6, r10, r8 |
| + mulu.d r10, r11, r9 |
| + add r7, r4 |
| + adc r10, r10, r5 |
| + acr r11 |
| +#else |
| + /* Do the multiplication using approximate calculation. discard the al x bl |
| + calculation. |
| + Place result in [r11, r10, r7]. The result is in 11.85 format. */ |
| + |
| + /* Do the multiplication using approximate calculation. |
| + Place result in r11, r10. Use r7, r6 as scratch registers */ |
| + mulu.d r6, r11, r8 |
| + macu.d r6, r10, r9 |
| + mulu.d r10, r11, r9 |
| + add r10, r7 |
| + acr r11 |
| +#endif |
| + /* Adjust exponent and mantissa */ |
| + /* [r12]:exp, [r11, r10]:mant [r7, r6]:sticky bits */ |
| + /* Mantissa may be of the format 00000000000.0xxx or 00000000000.1xxx. */ |
| + /* In the first case, shift one pos to left.*/ |
| + bld r11, 32-AVR32_F64_MUL_RES_INT_BITS-1 |
| + breq 0f |
| + lsl r7, 1 |
| + rol r10 |
| + rol r11 |
| + sub r12, 1 |
| +0: |
| + cp r12, 0 |
| + brle __avr32_f64_mul_res_subnormal /*Result was subnormal.*/ |
| + |
| + /* Check for Inf. */ |
| + cp.w r12, 0x7ff |
| + brge __avr32_f64_mul_res_inf |
| + |
| + /* Insert exponent. */ |
| + bfins r11, r12, 20, 11 |
| + |
| + /* Result was not subnormal. Perform rounding. */ |
| + /* For the fast version we discard the sticky bits and always round |
| + the halfwaycase up. */ |
| +24: |
| +#if defined(L_avr32_f64_mul) |
| + or r6, r6, r10 << 31 /* Or in parity bit into stickybits */ |
| + or r7, r7, r6 >> 1 /* Or together sticky and still make the msb |
| + of r7 represent the halfway bit. */ |
| + eorh r7, 0x8000 /* Toggle halfway bit. */ |
| + /* We should now round up by adding one for the following cases: |
| + |
| + halfway sticky|parity round-up |
| + 0 x no |
| + 1 0 no |
| + 1 1 yes |
| + |
| + Since we have inverted the halfway bit we can use the satu instruction |
| + by saturating to 1 bit to implement this. |
| + */ |
| + satu r7 >> 0, 1 |
| +#else |
| + lsr r7, 31 |
| +#endif |
| + add r10, r7 |
| + acr r11 |
| + |
| + /* Insert sign bit*/ |
| + bld lr, 31 |
| + bst r11, 31 |
| + |
| + /* Return result in [r11,r10] */ |
| +#if defined(L_avr32_f64_mul) |
| + popm r4-r7, pc |
| +#else |
| + ldm sp++, r5, r6, r7,pc |
| +#endif |
| + |
| + |
| +__avr32_f64_mul_op1_subnormal: |
| + andh r11, 0x000f /* Remove sign bit and exponent */ |
| + clz r12, r10 /* Count leading zeros in lsw */ |
| + clz r6, r11 /* Count leading zeros in msw */ |
| + subcs r12, -32 + AVR32_F64_MUL_OP1_INT_BITS |
| + movcs r6, r12 |
| + subcc r6, AVR32_F64_MUL_OP1_INT_BITS |
| + cp.w r6, 32 |
| + brge 0f |
| + |
| + /* shifting involves both msw and lsw*/ |
| + rsub r12, r6, 32 /* shift mantissa */ |
| + lsl r11, r11, r6 |
| + lsr r12, r10, r12 |
| + or r11, r12 |
| + lsl r10, r10, r6 |
| + sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS |
| + sub r7, r6 /* adjust exponent */ |
| + rjmp 22b /* Finished */ |
| +0: |
| + /* msw is zero so only need to consider lsw */ |
| + lsl r11, r10, r6 |
| + breq __avr32_f64_mul_res_zero |
| + mov r10, 0 |
| + sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS |
| + sub r7, r6 /* adjust exponent */ |
| + rjmp 22b |
| + |
| + |
| +__avr32_f64_mul_op2_subnormal: |
| + andh r9, 0x000f /* Remove sign bit and exponent */ |
| + clz r12, r8 /* Count leading zeros in lsw */ |
| + clz r5, r9 /* Count leading zeros in msw */ |
| + subcs r12, -32 + AVR32_F64_MUL_OP2_INT_BITS |
| + movcs r5, r12 |
| + subcc r5, AVR32_F64_MUL_OP2_INT_BITS |
| + cp.w r5, 32 |
| + brge 0f |
| + |
| + /* shifting involves both msw and lsw*/ |
| + rsub r12, r5, 32 /* shift mantissa */ |
| + lsl r9, r9, r5 |
| + lsr r12, r8, r12 |
| + or r9, r12 |
| + lsl r8, r8, r5 |
| + sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS |
| + sub r6, r5 /* adjust exponent */ |
| + rjmp 23b /* Finished */ |
| +0: |
| + /* msw is zero so only need to consider lsw */ |
| + lsl r9, r8, r5 |
| + breq __avr32_f64_mul_res_zero |
| + mov r8, 0 |
| + sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS |
| + sub r6, r5 /* adjust exponent */ |
| + rjmp 23b |
| + |
| + |
| +__avr32_f64_mul_op_nan_or_inf: |
| + /* Same code for OP1 and OP2*/ |
| + /* Since we are here, at least one of the OPs were NaN or INF*/ |
| + andh r9, 0x000f /* Remove sign bit and exponent */ |
| + andh r11, 0x000f /* Remove sign bit and exponent */ |
| + /* Merge the regs in each operand to check for zero*/ |
| + or r11, r10 /* op1 */ |
| + or r9, r8 /* op2 */ |
| + /* Check if op1 is NaN or INF */ |
| + cp r7, 0x7ff |
| + brne __avr32_f64_mul_op1_not_naninf |
| + /* op1 was NaN or INF.*/ |
| + cp r11, 0 |
| + brne __avr32_f64_mul_res_nan /* op1 was NaN. Result will be NaN*/ |
| + /*op1 was INF. check if op2 is NaN or INF*/ |
| + cp r6, 0x7ff |
| + brne __avr32_f64_mul_res_inf /*op1 was INF, op2 was neither NaN nor INF*/ |
| + /* op1 is INF, op2 is either NaN or INF*/ |
| + cp r9, 0 |
| + breq __avr32_f64_mul_res_inf /*op2 was also INF*/ |
| + rjmp __avr32_f64_mul_res_nan /*op2 was NaN*/ |
| + |
| +__avr32_f64_mul_op1_not_naninf: |
| + /* op1 was not NaN nor INF. Then op2 must be NaN or INF*/ |
| + cp r9, 0 |
| + breq __avr32_f64_mul_res_inf /*op2 was INF, return INF*/ |
| + rjmp __avr32_f64_mul_res_nan /*else return NaN*/ |
| + |
| +__avr32_f64_mul_res_subnormal:/* Multiply result was subnormal. */ |
| +#if defined(L_avr32_f64_mul) |
| + /* Check how much we must scale down the mantissa. */ |
| + neg r12 |
| + sub r12, -1 /* We do no longer have an implicit bit. */ |
| + satu r12 >> 0, 6 /* Saturate shift amount to max 63. */ |
| + cp.w r12, 32 |
| + brge 0f |
| + /* Shift amount <32 */ |
| + rsub r8, r12, 32 |
| + or r6, r7 |
| + lsr r7, r7, r12 |
| + lsl r9, r10, r8 |
| + or r7, r9 |
| + lsr r10, r10, r12 |
| + lsl r9, r11, r8 |
| + or r10, r9 |
| + lsr r11, r11, r12 |
| + rjmp 24b |
| +0: |
| + /* Shift amount >=32 */ |
| + rsub r8, r12, 32 |
| + moveq r9, 0 |
| + breq 0f |
| + lsl r9, r11, r8 |
| +0: |
| + or r6, r7 |
| + or r6, r6, r10 << 1 |
| + lsr r10, r10, r12 |
| + or r7, r9, r10 |
| + lsr r10, r11, r12 |
| + mov r11, 0 |
| + rjmp 24b |
| +#else |
| + /* Flush to zero for the fast version. */ |
| + mov r11, lr /*Get correct sign*/ |
| + andh r11, 0x8000, COH |
| + mov r10, 0 |
| + ldm sp++, r5, r6, r7,pc |
| +#endif |
| + |
| +__avr32_f64_mul_res_zero:/* Multiply result is zero. */ |
| + mov r11, lr /*Get correct sign*/ |
| + andh r11, 0x8000, COH |
| + mov r10, 0 |
| +#if defined(L_avr32_f64_mul) |
| + popm r4-r7, pc |
| +#else |
| + ldm sp++, r5, r6, r7,pc |
| +#endif |
| + |
| +__avr32_f64_mul_res_nan: /* Return NaN. */ |
| + mov r11, -1 |
| + mov r10, -1 |
| +#if defined(L_avr32_f64_mul) |
| + popm r4-r7, pc |
| +#else |
| + ldm sp++, r5, r6, r7,pc |
| +#endif |
| + |
| +__avr32_f64_mul_res_inf: /* Return INF. */ |
| + mov r11, 0xfff00000 |
| + bld lr, 31 |
| + bst r11, 31 |
| + mov r10, 0 |
| +#if defined(L_avr32_f64_mul) |
| + popm r4-r7, pc |
| +#else |
| + ldm sp++, r5, r6, r7,pc |
| +#endif |
| + |
| +__avr32_f64_mul_op1_zero: |
| + /* Get sign */ |
| + eor r11, r11, r9 |
| + andh r11, 0x8000, COH |
| + /* Check if op2 is Inf or NaN. */ |
| + bfextu r12, r9, 20, 11 |
| + cp.w r12, 0x7ff |
| + retne r12 /* Return 0.0 */ |
| + /* Return NaN */ |
| + mov r10, -1 |
| + mov r11, -1 |
| + ret r12 |
| + |
| + |
| + |
| +#endif |
| + |
| + |
| +#if defined(L_avr32_f64_addsub) || defined(L_avr32_f64_addsub_fast) |
| + .align 2 |
| + |
| +__avr32_f64_sub_from_add: |
| + /* Switch sign on op2 */ |
| + eorh r9, 0x8000 |
| + |
| +#if defined(L_avr32_f64_addsub_fast) |
| + .global __avr32_f64_sub_fast |
| + .type __avr32_f64_sub_fast,@function |
| +__avr32_f64_sub_fast: |
| +#else |
| + .global __avr32_f64_sub |
| + .type __avr32_f64_sub,@function |
| +__avr32_f64_sub: |
| +#endif |
| + |
| + /* op1 in {r11,r10}*/ |
| + /* op2 in {r9,r8}*/ |
| + |
| +#if defined(L_avr32_f64_addsub_fast) |
| + /* If op2 is zero just return op1 */ |
| + or r12, r8, r9 << 1 |
| + reteq r12 |
| +#endif |
| + |
| + /* Check signs */ |
| + eor r12, r11, r9 |
| + /* Different signs, use addition. */ |
| + brmi __avr32_f64_add_from_sub |
| + |
| + stm --sp, r5, r6, r7, lr |
| + |
| + /* Get sign of op1 into r12 */ |
| + mov r12, r11 |
| + andh r12, 0x8000, COH |
| + |
| + /* Remove sign from operands */ |
| + cbr r11, 31 |
| + cbr r9, 31 |
| + |
| + /* Put the largest number in [r11, r10] |
| + and the smallest number in [r9, r8] */ |
| + cp r10, r8 |
| + cpc r11, r9 |
| + brhs 1f /* Skip swap if operands already correctly ordered*/ |
| + /* Operands were not correctly ordered, swap them*/ |
| + mov r7, r11 |
| + mov r11, r9 |
| + mov r9, r7 |
| + mov r7, r10 |
| + mov r10, r8 |
| + mov r8, r7 |
| + eorh r12, 0x8000 /* Invert sign in r12*/ |
| +1: |
| + /* Unpack largest operand - opH */ |
| + /* exp: r7 */ |
| + /* sf: r11, r10 */ |
| + lsr r7, r11, 20 /* Extract exponent */ |
| + lsl r11, 11 /* Extract mantissa, leave room for implicit bit */ |
| + or r11, r11, r10>>21 |
| + lsl r10, 11 |
| + sbr r11, 31 /* Insert implicit bit */ |
| + |
| + |
| + /* Unpack smallest operand - opL */ |
| + /* exp: r6 */ |
| + /* sf: r9, r8 */ |
| + lsr r6, r9, 20 /* Extract exponent */ |
| + breq __avr32_f64_sub_opL_subnormal /* If either zero or subnormal */ |
| + lsl r9, 11 /* Extract mantissa, leave room for implicit bit */ |
| + or r9, r9, r8>>21 |
| + lsl r8, 11 |
| + sbr r9, 31 /* Insert implicit bit */ |
| + |
| + |
| +__avr32_f64_sub_opL_subnormal_done: |
| + /* opH is NaN or Inf. */ |
| + cp.w r7, 0x7ff |
| + breq __avr32_f64_sub_opH_nan_or_inf |
| + |
| + /* Get shift amount to scale mantissa of op2. */ |
| + rsub r6, r7 |
| + breq __avr32_f64_sub_shift_done /* No need to shift, exponents are equal*/ |
| + |
| + /* Scale mantissa [r9, r8] with amount [r6]. |
| + Uses scratch registers [r5] and [lr]. |
| + In IEEE mode:Must not forget the sticky bits we intend to shift out. */ |
| + |
| + rsub r5,r6,32 /* get (32 - shift count) |
| + (if shift count > 32 we get a |
| + negative value, but that will |
| + work as well in the code below.) */ |
| + |
| + cp.w r6,32 /* handle shifts >= 32 separately */ |
| + brhs __avr32_f64_sub_longshift |
| + |
| + /* small (<32) shift amount, both words are part of the shift |
| + first remember whether part that is lost contains any 1 bits ... */ |
| + lsl lr,r8,r5 /* shift away bits that are part of |
| + final mantissa. only part that goes |
| + to lr are bits that will be lost */ |
| + |
| + /* ... and now to the actual shift */ |
| + lsl r5,r9,r5 /* get bits from msw destined for lsw*/ |
| + lsr r8,r8,r6 /* shift down lsw of mantissa */ |
| + lsr r9,r9,r6 /* shift down msw of mantissa */ |
| + or r8,r5 /* combine these bits with prepared lsw*/ |
| +#if defined(L_avr32_f64_addsub) |
| + cp.w lr,0 /* if any '1' bit in part we lost ...*/ |
| + srne lr |
| + or r8, lr /* ... we need to set sticky bit*/ |
| +#endif |
| + |
| +__avr32_f64_sub_shift_done: |
| + /* Now subtract the mantissas. */ |
| + sub r10, r8 |
| + sbc r11, r11, r9 |
| + |
| + /* Normalize the exponent and mantissa pair stored in |
| + [r11,r10] and exponent in [r7]. Needs two scratch registers [r6] and [lr]. */ |
| + clz r6,r11 /* Check if we have zeros in high bits */ |
| + breq __avr32_f64_sub_longnormalize_done /* No need for scaling if no zeros in high bits */ |
| + brcs __avr32_f64_sub_longnormalize |
| + |
| + |
| + /* shift amount is smaller than 32, and involves both msw and lsw*/ |
| + rsub lr,r6,32 /* shift mantissa */ |
| + lsl r11,r11,r6 |
| + lsr lr,r10,lr |
| + or r11,lr |
| + lsl r10,r10,r6 |
| + |
| + sub r7,r6 /* adjust exponent */ |
| + brle __avr32_f64_sub_subnormal_result |
| +__avr32_f64_sub_longnormalize_done: |
| + |
| +#if defined(L_avr32_f64_addsub) |
| + /* Insert the bits we will remove from the mantissa r9[31:21] */ |
| + lsl r9, r10, (32 - 11) |
| +#else |
| + /* Keep the last bit shifted out. */ |
| + bfextu r9, r10, 10, 1 |
| +#endif |
| + |
| + /* Pack final result*/ |
| + /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */ |
| + /* Result in [r11,r10] */ |
| + /* Insert mantissa */ |
| + lsr r10, 11 |
| + or r10, r10, r11<<21 |
| + lsr r11, 11 |
| + /* Insert exponent and sign bit*/ |
| + bfins r11, r7, 20, 11 |
| + or r11, r12 |
| + |
| + /* Round */ |
| +__avr32_f64_sub_round: |
| +#if defined(L_avr32_f64_addsub) |
| + mov_imm r7, 0x80000000 |
| + bld r10, 0 |
| + subne r7, -1 |
| + |
| + cp.w r9, r7 |
| + srhs r9 |
| +#endif |
| + add r10, r9 |
| + acr r11 |
| + |
| + /* Return result in [r11,r10] */ |
| + ldm sp++, r5, r6, r7,pc |
| + |
| + |
| + |
| +__avr32_f64_sub_opL_subnormal: |
| + /* Extract the of mantissa */ |
| + lsl r9, 11 /* Extract mantissa, leave room for implicit bit */ |
| + or r9, r9, r8>>21 |
| + lsl r8, 11 |
| + |
| + /* Set exponent to 1 if we do not have a zero. */ |
| + or lr, r9, r8 |
| + movne r6,1 |
| + |
| + /* Check if opH is also subnormal. If so, clear implicit bit in r11*/ |
| + rsub lr, r7, 0 |
| + moveq r7,1 |
| + bst r11, 31 |
| + |
| + /* Check if op1 is zero, if so set exponent to 0. */ |
| + or lr, r11, r10 |
| + moveq r7,0 |
| + |
| + rjmp __avr32_f64_sub_opL_subnormal_done |
| + |
| +__avr32_f64_sub_opH_nan_or_inf: |
| + /* Check if opH is NaN, if so return NaN */ |
| + cbr r11, 31 |
| + or lr, r11, r10 |
| + brne __avr32_f64_sub_return_nan |
| + |
| + /* opH is Inf. */ |
| + /* Check if opL is Inf. or NaN */ |
| + cp.w r6, 0x7ff |
| + breq __avr32_f64_sub_return_nan |
| + /* Return infinity with correct sign. */ |
| + or r11, r12, r7 << 20 |
| + ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */ |
| +__avr32_f64_sub_return_nan: |
| + mov r10, -1 /* Generate NaN in r11, r10 */ |
| + mov r11, -1 |
| + ldm sp++, r5, r6, r7, pc/* opL Inf or NaN, return NaN */ |
| + |
| + |
| +__avr32_f64_sub_subnormal_result: |
| +#if defined(L_avr32_f64_addsub) |
| + /* Check how much we must scale down the mantissa. */ |
| + neg r7 |
| + sub r7, -1 /* We do no longer have an implicit bit. */ |
| + satu r7 >> 0, 6 /* Saturate shift amount to max 63. */ |
| + cp.w r7, 32 |
| + brge 0f |
| + /* Shift amount <32 */ |
| + rsub r8, r7, 32 |
| + lsl r9, r10, r8 |
| + srne r6 |
| + lsr r10, r10, r7 |
| + or r10, r6 /* Sticky bit from the |
| + part that was shifted out. */ |
| + lsl r9, r11, r8 |
| + or r10, r10, r9 |
| + lsr r11, r10, r7 |
| + /* Set exponent */ |
| + mov r7, 0 |
| + rjmp __avr32_f64_sub_longnormalize_done |
| +0: |
| + /* Shift amount >=32 */ |
| + rsub r8, r7, 64 |
| + lsl r9, r11, r8 |
| + or r9, r10 |
| + srne r6 |
| + lsr r10, r11, r7 |
| + or r10, r6 /* Sticky bit from the |
| + part that was shifted out. */ |
| + mov r11, 0 |
| + /* Set exponent */ |
| + mov r7, 0 |
| + rjmp __avr32_f64_sub_longnormalize_done |
| +#else |
| + /* Just flush subnormals to zero. */ |
| + mov r10, 0 |
| + mov r11, 0 |
| +#endif |
| + ldm sp++, r5, r6, r7, pc |
| + |
| +__avr32_f64_sub_longshift: |
| + /* large (>=32) shift amount, only lsw will have bits left after shift. |
| + note that shift operations will use ((shift count=r6) mod 32) so |
| + we do not need to subtract 32 from shift count. */ |
| + /* Saturate the shift amount to 63. If the amount |
| + is any larger op2 is insignificant. */ |
| + satu r6 >> 0, 6 |
| + |
| +#if defined(L_avr32_f64_addsub) |
| + /* first remember whether part that is lost contains any 1 bits ... */ |
| + moveq lr, r8 /* If shift amount is 32, no bits from msw are lost. */ |
| + breq 0f |
| + lsl lr,r9,r5 /* save all lost bits from msw */ |
| + or lr,r8 /* also save lost bits (all) from lsw |
| + now lr != 0 if we lose any bits */ |
| +#endif |
| +0: |
| + /* ... and now to the actual shift */ |
| + lsr r8,r9,r6 /* Move msw to lsw and shift. */ |
| + mov r9,0 /* clear msw */ |
| +#if defined(L_avr32_f64_addsub) |
| + cp.w lr,0 /* if any '1' bit in part we lost ...*/ |
| + srne lr |
| + or r8, lr /* ... we need to set sticky bit*/ |
| +#endif |
| + rjmp __avr32_f64_sub_shift_done |
| + |
| +__avr32_f64_sub_longnormalize: |
| + /* shift amount is greater than 32 */ |
| + clz r6,r10 /* shift mantissa */ |
| + /* If the resulting mantissa is zero the result is |
| + zero so force exponent to zero. */ |
| + movcs r7, 0 |
| + movcs r6, 0 |
| + movcs r12, 0 /* Also clear sign bit. A zero result from subtraction |
| + always is +0.0 */ |
| + subcc r6,-32 |
| + lsl r11,r10,r6 |
| + mov r10,0 |
| + sub r7,r6 /* adjust exponent */ |
| + brle __avr32_f64_sub_subnormal_result |
| + rjmp __avr32_f64_sub_longnormalize_done |
| + |
| + |
| + |
| + .align 2 |
| +__avr32_f64_add_from_sub: |
| + /* Switch sign on op2 */ |
| + eorh r9, 0x8000 |
| + |
| +#if defined(L_avr32_f64_addsub_fast) |
| + .global __avr32_f64_add_fast |
| + .type __avr32_f64_add_fast,@function |
| +__avr32_f64_add_fast: |
| +#else |
| + .global __avr32_f64_add |
| + .type __avr32_f64_add,@function |
| +__avr32_f64_add: |
| +#endif |
| + |
| + /* op1 in {r11,r10}*/ |
| + /* op2 in {r9,r8}*/ |
| + |
| +#if defined(L_avr32_f64_addsub_fast) |
| + /* If op2 is zero just return op1 */ |
| + or r12, r8, r9 << 1 |
| + reteq r12 |
| +#endif |
| + |
| + /* Check signs */ |
| + eor r12, r11, r9 |
| + /* Different signs, use subtraction. */ |
| + brmi __avr32_f64_sub_from_add |
| + |
| + stm --sp, r5, r6, r7, lr |
| + |
| + /* Get sign of op1 into r12 */ |
| + mov r12, r11 |
| + andh r12, 0x8000, COH |
| + |
| + /* Remove sign from operands */ |
| + cbr r11, 31 |
| + cbr r9, 31 |
| + |
| + /* Put the number with the largest exponent in [r11, r10] |
| + and the number with the smallest exponent in [r9, r8] */ |
| + cp r11, r9 |
| + brhs 1f /* Skip swap if operands already correctly ordered */ |
| + /* Operands were not correctly ordered, swap them */ |
| + mov r7, r11 |
| + mov r11, r9 |
| + mov r9, r7 |
| + mov r7, r10 |
| + mov r10, r8 |
| + mov r8, r7 |
| +1: |
| + mov lr, 0 /* Set sticky bits to zero */ |
| + /* Unpack largest operand - opH */ |
| + /* exp: r7 */ |
| + /* sf: r11, r10 */ |
| + bfextu R7, R11, 20, 11 /* Extract exponent */ |
| + bfextu r11, r11, 0, 20 /* Extract mantissa */ |
| + sbr r11, 20 /* Insert implicit bit */ |
| + |
| + /* Unpack smallest operand - opL */ |
| + /* exp: r6 */ |
| + /* sf: r9, r8 */ |
| + bfextu R6, R9, 20, 11 /* Extract exponent */ |
| + breq __avr32_f64_add_op2_subnormal |
| + bfextu r9, r9, 0, 20 /* Extract mantissa */ |
| + sbr r9, 20 /* Insert implicit bit */ |
| + |
| +2: |
| + /* opH is NaN or Inf. */ |
| + cp.w r7, 0x7ff |
| + breq __avr32_f64_add_opH_nan_or_inf |
| + |
| + /* Get shift amount to scale mantissa of op2. */ |
| + rsub r6, r7 |
| + breq __avr32_f64_add_shift_done /* No need to shift, exponents are equal*/ |
| + |
| + /* Scale mantissa [r9, r8] with amount [r6]. |
| + Uses scratch registers [r5] and [lr]. |
| + In IEEE mode:Must not forget the sticky bits we intend to shift out. */ |
| + rsub r5,r6,32 /* get (32 - shift count) |
| + (if shift count > 32 we get a |
| + negative value, but that will |
| + work as well in the code below.) */ |
| + |
| + cp.w r6,32 /* handle shifts >= 32 separately */ |
| + brhs __avr32_f64_add_longshift |
| + |
| + /* small (<32) shift amount, both words are part of the shift |
| + first remember whether part that is lost contains any 1 bits ... */ |
| + lsl lr,r8,r5 /* shift away bits that are part of |
| + final mantissa. only part that goes |
| + to lr are bits that will be lost */ |
| + |
| + /* ... and now to the actual shift */ |
| + lsl r5,r9,r5 /* get bits from msw destined for lsw*/ |
| + lsr r8,r8,r6 /* shift down lsw of mantissa */ |
| + lsr r9,r9,r6 /* shift down msw of mantissa */ |
| + or r8,r5 /* combine these bits with prepared lsw*/ |
| + |
| +__avr32_f64_add_shift_done: |
| + /* Now add the mantissas. */ |
| + add r10, r8 |
| + adc r11, r11, r9 |
| + |
| + /* Check if we overflowed. */ |
| + bld r11, 21 |
| + breq __avr32_f64_add_res_of: |
| + |
| +__avr32_f64_add_res_of_done: |
| + |
| + /* Pack final result*/ |
| + /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */ |
| + /* Result in [r11,r10] */ |
| + /* Insert exponent and sign bit*/ |
| + bfins r11, r7, 20, 11 |
| + or r11, r12 |
| + |
| + /* Round */ |
| +__avr32_f64_add_round: |
| +#if defined(L_avr32_f64_addsub) |
| + bfextu r12, r10, 0, 1 /* Extract parity bit.*/ |
| + or lr, r12 /* or it together with the sticky bits. */ |
| + eorh lr, 0x8000 /* Toggle round bit. */ |
| + /* We should now round up by adding one for the following cases: |
| + |
| + halfway sticky|parity round-up |
| + 0 x no |
| + 1 0 no |
| + 1 1 yes |
| + |
| + Since we have inverted the halfway bit we can use the satu instruction |
| + by saturating to 1 bit to implement this. |
| + */ |
| + satu lr >> 0, 1 |
| +#else |
| + lsr lr, 31 |
| +#endif |
| + add r10, lr |
| + acr r11 |
| + |
| + /* Return result in [r11,r10] */ |
| + ldm sp++, r5, r6, r7,pc |
| + |
| + |
| +__avr32_f64_add_opH_nan_or_inf: |
| + /* Check if opH is NaN, if so return NaN */ |
| + cbr r11, 20 |
| + or lr, r11, r10 |
| + brne __avr32_f64_add_return_nan |
| + |
| + /* opH is Inf. */ |
| + /* Check if opL is Inf. or NaN */ |
| + cp.w r6, 0x7ff |
| + breq __avr32_f64_add_opL_nan_or_inf |
| + ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */ |
| +__avr32_f64_add_opL_nan_or_inf: |
| + cbr r9, 20 |
| + or lr, r9, r8 |
| + brne __avr32_f64_add_return_nan |
| + mov r10, 0 /* Generate Inf in r11, r10 */ |
| + mov_imm r11, 0x7ff00000 |
| + ldm sp++, r5, r6, r7, pc/* opL Inf, return Inf */ |
| +__avr32_f64_add_return_nan: |
| + mov r10, -1 /* Generate NaN in r11, r10 */ |
| + mov r11, -1 |
| + ldm sp++, r5, r6, r7, pc/* opL Inf or NaN, return NaN */ |
| + |
| + |
| +__avr32_f64_add_longshift: |
| + /* large (>=32) shift amount, only lsw will have bits left after shift. |
| + note that shift operations will use ((shift count=r6) mod 32) so |
| + we do not need to subtract 32 from shift count. */ |
| + /* Saturate the shift amount to 63. If the amount |
| + is any larger op2 is insignificant. */ |
| + satu r6 >> 0, 6 |
| + /* If shift amount is 32 there are no bits from the msw that are lost. */ |
| + moveq lr, r8 |
| + breq 0f |
| + /* first remember whether part that is lost contains any 1 bits ... */ |
| + lsl lr,r9,r5 /* save all lost bits from msw */ |
| +#if defined(L_avr32_f64_addsub) |
| + cp.w r8, 0 |
| + srne r8 |
| + or lr,r8 /* also save lost bits (all) from lsw |
| + now lr != 0 if we lose any bits */ |
| +#endif |
| +0: |
| + /* ... and now to the actual shift */ |
| + lsr r8,r9,r6 /* msw -> lsw and make rest of shift inside lsw*/ |
| + mov r9,0 /* clear msw */ |
| + rjmp __avr32_f64_add_shift_done |
| + |
| +__avr32_f64_add_res_of: |
| + /* We overflowed. Scale down mantissa by shifting right one position. */ |
| + or lr, lr, lr << 1 /* Remember stickybits*/ |
| + lsr r11, 1 |
| + ror r10 |
| + ror lr |
| + sub r7, -1 /* Increment exponent */ |
| + |
| + /* Clear mantissa to set result to Inf if the exponent is 255. */ |
| + cp.w r7, 0x7ff |
| + moveq r10, 0 |
| + moveq r11, 0 |
| + moveq lr, 0 |
| + rjmp __avr32_f64_add_res_of_done |
| + |
| +__avr32_f64_add_op2_subnormal: |
| + /* Set epxponent to 1 */ |
| + mov r6, 1 |
| + |
| + /* Check if op2 is also subnormal. */ |
| + cp.w r7, 0 |
| + brne 2b |
| + |
| + cbr r11, 20 |
| + /* Both operands are subnormal. Just addd the mantissas |
| + and the exponent will automatically be set to 1 if |
| + we overflow into a normal number. */ |
| + add r10, r8 |
| + adc r11, r11, r9 |
| + |
| + /* Add sign bit */ |
| + or r11, r12 |
| + |
| + /* Return result in [r11,r10] */ |
| + ldm sp++, r5, r6, r7,pc |
| + |
| + |
| + |
| +#endif |
| + |
| +#ifdef L_avr32_f64_to_u32 |
| + /* This goes into L_fixdfsi */ |
| +#endif |
| + |
| + |
| +#ifdef L_avr32_f64_to_s32 |
| + .global __avr32_f64_to_u32 |
| + .type __avr32_f64_to_u32,@function |
| +__avr32_f64_to_u32: |
| + cp.w r11, 0 |
| + retmi 0 /* Negative returns 0 */ |
| + |
| + /* Fallthrough to df to signed si conversion */ |
| + .global __avr32_f64_to_s32 |
| + .type __avr32_f64_to_s32,@function |
| +__avr32_f64_to_s32: |
| + lsl r12,r11,1 |
| + lsr r12,21 /* extract exponent*/ |
| + sub r12,1023 /* convert to unbiased exponent.*/ |
| + retlo 0 /* too small exponent implies zero. */ |
| + |
| +1: |
| + rsub r12,r12,31 /* shift count = 31 - exponent */ |
| + mov r9,r11 /* save sign for later...*/ |
| + lsl r11,11 /* remove exponent and sign*/ |
| + sbr r11,31 /* add implicit bit*/ |
| + or r11,r11,r10>>21 /* get rest of bits from lsw of double */ |
| + lsr r11,r11,r12 /* shift down mantissa to final place */ |
| + lsl r9,1 /* sign -> carry */ |
| + retcc r11 /* if positive, we are done */ |
| + neg r11 /* if negative float, negate result */ |
| + ret r11 |
| + |
| +#endif /* L_fixdfsi*/ |
| + |
| +#ifdef L_avr32_f64_to_u64 |
| + /* Actual function is in L_fixdfdi */ |
| +#endif |
| + |
| +#ifdef L_avr32_f64_to_s64 |
| + .global __avr32_f64_to_u64 |
| + .type __avr32_f64_to_u64,@function |
| +__avr32_f64_to_u64: |
| + cp.w r11,0 |
| + /* Negative numbers return zero */ |
| + movmi r10, 0 |
| + movmi r11, 0 |
| + retmi r11 |
| + |
| + |
| + |
| + /* Fallthrough */ |
| + .global __avr32_f64_to_s64 |
| + .type __avr32_f64_to_s64,@function |
| +__avr32_f64_to_s64: |
| + lsl r9,r11,1 |
| + lsr r9,21 /* get exponent*/ |
| + sub r9,1023 /* convert to correct range*/ |
| + /* Return zero if exponent to small */ |
| + movlo r10, 0 |
| + movlo r11, 0 |
| + retlo r11 |
| + |
| + mov r8,r11 /* save sign for later...*/ |
| +1: |
| + lsl r11,11 /* remove exponent */ |
| + sbr r11,31 /* add implicit bit*/ |
| + or r11,r11,r10>>21 /* get rest of bits from lsw of double*/ |
| + lsl r10,11 /* align lsw correctly as well */ |
| + rsub r9,r9,63 /* shift count = 63 - exponent */ |
| + breq 1f |
| + |
| + cp.w r9,32 /* is shift count more than one reg? */ |
| + brhs 0f |
| + |
| + mov r12,r11 /* save msw */ |
| + lsr r10,r10,r9 /* small shift count, shift down lsw */ |
| + lsr r11,r11,r9 /* small shift count, shift down msw */ |
| + rsub r9,r9,32 /* get 32-size of shifted out tail */ |
| + lsl r12,r12,r9 /* align part to move from msw to lsw */ |
| + or r10,r12 /* combine to get new lsw */ |
| + rjmp 1f |
| + |
| +0: |
| + lsr r10,r11,r9 /* large shift count,only lsw get bits |
| + note that shift count is modulo 32*/ |
| + mov r11,0 /* msw will be 0 */ |
| + |
| +1: |
| + lsl r8,1 /* sign -> carry */ |
| + retcc r11 /* if positive, we are done */ |
| + |
| + neg r11 /* if negative float, negate result */ |
| + neg r10 |
| + scr r11 |
| + ret r11 |
| + |
| +#endif |
| + |
| +#ifdef L_avr32_u32_to_f64 |
| + /* Code located in L_floatsidf */ |
| +#endif |
| + |
| +#ifdef L_avr32_s32_to_f64 |
| + .global __avr32_u32_to_f64 |
| + .type __avr32_u32_to_f64,@function |
| +__avr32_u32_to_f64: |
| + sub r11, r12, 0 /* Move to r11 and force Z flag to be updated */ |
| + mov r12, 0 /* always positive */ |
| + rjmp 0f /* Jump to common code for floatsidf */ |
| + |
| + .global __avr32_s32_to_f64 |
| + .type __avr32_s32_to_f64,@function |
| +__avr32_s32_to_f64: |
| + mov r11, r12 /* Keep original value in r12 for sign */ |
| + abs r11 /* Absolute value if r12 */ |
| +0: |
| + mov r10,0 /* let remaining bits be zero */ |
| + reteq r11 /* zero long will return zero float */ |
| + |
| + pushm lr |
| + mov r9,31+1023 /* set exponent */ |
| + |
| + normalize_df r9 /*exp*/, r10, r11 /* mantissa */, r8, lr /* scratch */ |
| + |
| + /* Check if a subnormal result was created */ |
| + cp.w r9, 0 |
| + brgt 0f |
| + |
| + adjust_subnormal_df r9 /* exp */, r10, r11 /* Mantissa */, r12 /*sign*/, r8, lr /* scratch */ |
| + popm pc |
| +0: |
| + |
| + /* Round result */ |
| + round_df r9 /*exp*/, r10, r11 /* Mantissa */, r8 /*scratch*/ |
| + cp.w r9,0x7ff |
| + brlt 0f |
| + /*Return infinity */ |
| + mov r10, 0 |
| + mov_imm r11, 0xffe00000 |
| + rjmp __floatsidf_return_op1 |
| + |
| +0: |
| + |
| + /* Pack */ |
| + pack_df r9 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/ |
| +__floatsidf_return_op1: |
| + lsl r12,1 /* shift in sign bit */ |
| + ror r11 |
| + |
| + popm pc |
| +#endif |
| + |
| + |
| +#ifdef L_avr32_f32_cmp_eq |
| + .global __avr32_f32_cmp_eq |
| + .type __avr32_f32_cmp_eq,@function |
| +__avr32_f32_cmp_eq: |
| + cp.w r12, r11 |
| + breq 0f |
| + /* If not equal check for +/-0 */ |
| + /* Or together the two values and shift out the sign bit. |
| + If the result is zero, then the two values are both zero. */ |
| + or r12, r11 |
| + lsl r12, 1 |
| + reteq 1 |
| + ret 0 |
| +0: |
| + /* Numbers were equal. Check for NaN or Inf */ |
| + mov_imm r11, 0xff000000 |
| + lsl r12, 1 |
| + cp.w r12, r11 |
| + retls 1 /* 0 if NaN, 1 otherwise */ |
| + ret 0 |
| +#endif |
| + |
| +#if defined(L_avr32_f32_cmp_ge) || defined(L_avr32_f32_cmp_lt) |
| +#ifdef L_avr32_f32_cmp_ge |
| + .global __avr32_f32_cmp_ge |
| + .type __avr32_f32_cmp_ge,@function |
| +__avr32_f32_cmp_ge: |
| +#endif |
| +#ifdef L_avr32_f32_cmp_lt |
| + .global __avr32_f32_cmp_lt |
| + .type __avr32_f32_cmp_lt,@function |
| +__avr32_f32_cmp_lt: |
| +#endif |
| + lsl r10, r12, 1 /* Remove sign bits */ |
| + lsl r9, r11, 1 |
| + subfeq r10, 0 |
| +#ifdef L_avr32_f32_cmp_ge |
| + reteq 1 /* Both number are zero. Return true. */ |
| +#endif |
| +#ifdef L_avr32_f32_cmp_lt |
| + reteq 0 /* Both number are zero. Return false. */ |
| +#endif |
| + mov_imm r8, 0xff000000 |
| + cp.w r10, r8 |
| + rethi 0 /* Op0 is NaN */ |
| + cp.w r9, r8 |
| + rethi 0 /* Op1 is Nan */ |
| + |
| + eor r8, r11, r12 |
| + bld r12, 31 |
| +#ifdef L_avr32_f32_cmp_ge |
| + srcc r8 /* Set result to true if op0 is positive*/ |
| +#endif |
| +#ifdef L_avr32_f32_cmp_lt |
| + srcs r8 /* Set result to true if op0 is negative*/ |
| +#endif |
| + retmi r8 /* Return if signs are different */ |
| + brcs 0f /* Both signs negative? */ |
| + |
| + /* Both signs positive */ |
| + cp.w r12, r11 |
| +#ifdef L_avr32_f32_cmp_ge |
| + reths 1 |
| + retlo 0 |
| +#endif |
| +#ifdef L_avr32_f32_cmp_lt |
| + reths 0 |
| + retlo 1 |
| +#endif |
| +0: |
| + /* Both signs negative */ |
| + cp.w r11, r12 |
| +#ifdef L_avr32_f32_cmp_ge |
| + reths 1 |
| + retlo 0 |
| +#endif |
| +#ifdef L_avr32_f32_cmp_lt |
| + reths 0 |
| + retlo 1 |
| +#endif |
| +#endif |
| + |
| + |
| +#ifdef L_avr32_f64_cmp_eq |
| + .global __avr32_f64_cmp_eq |
| + .type __avr32_f64_cmp_eq,@function |
| +__avr32_f64_cmp_eq: |
| + cp.w r10,r8 |
| + cpc r11,r9 |
| + breq 0f |
| + |
| + /* Args were not equal*/ |
| + /* Both args could be zero with different sign bits */ |
| + lsl r11,1 /* get rid of sign bits */ |
| + lsl r9,1 |
| + or r11,r10 /* Check if all bits are zero */ |
| + or r11,r9 |
| + or r11,r8 |
| + reteq 1 /* If all zeros the arguments are equal |
| + so return 1 else return 0 */ |
| + ret 0 |
| +0: |
| + /* check for NaN */ |
| + lsl r11,1 |
| + mov_imm r12, 0xffe00000 |
| + cp.w r10,0 |
| + cpc r11,r12 /* check if nan or inf */ |
| + retls 1 /* If Arg is NaN return 0 else 1*/ |
| + ret 0 /* Return */ |
| + |
| +#endif |
| + |
| + |
| +#if defined(L_avr32_f64_cmp_ge) || defined(L_avr32_f64_cmp_lt) |
| + |
| +#ifdef L_avr32_f64_cmp_ge |
| + .global __avr32_f64_cmp_ge |
| + .type __avr32_f64_cmp_ge,@function |
| +__avr32_f64_cmp_ge: |
| +#endif |
| +#ifdef L_avr32_f64_cmp_lt |
| + .global __avr32_f64_cmp_lt |
| + .type __avr32_f64_cmp_lt,@function |
| +__avr32_f64_cmp_lt: |
| +#endif |
| + |
| + /* compare magnitude of op1 and op2 */ |
| + lsl r11,1 /* Remove sign bit of op1 */ |
| + srcs r12 /* Sign op1 to lsb of r12*/ |
| + subfeq r10, 0 |
| + breq 3f /* op1 zero */ |
| + lsl r9,1 /* Remove sign bit of op2 */ |
| + rol r12 /* Sign op2 to lsb of lr, sign bit op1 bit 1 of r12*/ |
| + |
| + |
| + /* Check for Nan */ |
| + pushm lr |
| + mov_imm lr, 0xffe00000 |
| + cp.w r10,0 |
| + cpc r11,lr |
| + brhi 0f /* We have NaN */ |
| + cp.w r8,0 |
| + cpc r9,lr |
| + brhi 0f /* We have NaN */ |
| + popm lr |
| + |
| + cp.w r12,3 /* both operands negative ?*/ |
| + breq 1f |
| + |
| + cp.w r12,1 /* both operands positive? */ |
| + brlo 2f |
| + |
| + /* Different signs. If sign of op1 is negative the difference |
| + between op1 and op2 will always be negative, and if op1 is |
| + positive the difference will always be positive */ |
| +#ifdef L_avr32_f64_cmp_ge |
| + reteq 1 |
| + retne 0 |
| +#endif |
| +#ifdef L_avr32_f64_cmp_lt |
| + reteq 0 |
| + retne 1 |
| +#endif |
| + |
| +2: |
| + /* Both operands positive. Just compute the difference */ |
| + cp.w r10,r8 |
| + cpc r11,r9 |
| +#ifdef L_avr32_f64_cmp_ge |
| + reths 1 |
| + retlo 0 |
| +#endif |
| +#ifdef L_avr32_f64_cmp_lt |
| + reths 0 |
| + retlo 1 |
| +#endif |
| + |
| +1: |
| + /* Both operands negative. Compute the difference with operands switched */ |
| + cp r8,r10 |
| + cpc r9,r11 |
| +#ifdef L_avr32_f64_cmp_ge |
| + reths 1 |
| + retlo 0 |
| +#endif |
| +#ifdef L_avr32_f64_cmp_lt |
| + reths 0 |
| + retlo 1 |
| +#endif |
| + |
| +0: |
| + popm pc, r12=0 |
| +#endif |
| + |
| +3: |
| + lsl r9,1 /* Remove sign bit of op1 */ |
| +#ifdef L_avr32_f64_cmp_ge |
| + srcs r12 /* If op2 is negative then op1 >= op2. */ |
| +#endif |
| +#ifdef L_avr32_f64_cmp_lt |
| + srcc r12 /* If op2 is positve then op1 <= op2. */ |
| +#endif |
| + subfeq r8, 0 |
| +#ifdef L_avr32_f64_cmp_ge |
| + reteq 1 /* Both operands are zero. Return true. */ |
| +#endif |
| +#ifdef L_avr32_f64_cmp_lt |
| + reteq 0 /* Both operands are zero. Return false. */ |
| +#endif |
| + ret r12 |
| + |
| + |
| +#if defined(L_avr32_f64_div) || defined(L_avr32_f64_div_fast) |
| + .align 2 |
| + |
| +#if defined(L_avr32_f64_div_fast) |
| + .global __avr32_f64_div_fast |
| + .type __avr32_f64_div_fast,@function |
| +__avr32_f64_div_fast: |
| +#else |
| + .global __avr32_f64_div |
| + .type __avr32_f64_div,@function |
| +__avr32_f64_div: |
| +#endif |
| + stm --sp, r0, r1, r2, r3, r4, r5, r6, r7,lr |
| + /* op1 in {r11,r10}*/ |
| + /* op2 in {r9,r8}*/ |
| + eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */ |
| + |
| + |
| + /* Unpack op1 to 2.62 format*/ |
| + /* exp: r7 */ |
| + /* sf: r11, r10 */ |
| + lsr r7, r11, 20 /* Extract exponent */ |
| + |
| + lsl r11, 9 /* Extract mantissa, leave room for implicit bit */ |
| + or r11, r11, r10>>23 |
| + lsl r10, 9 |
| + sbr r11, 29 /* Insert implicit bit */ |
| + andh r11, 0x3fff /*Mask last part of exponent since we use 2.62 format*/ |
| + |
| + cbr r7, 11 /* Clear sign bit */ |
| + /* Check if normalization is needed */ |
| + breq 11f /*If number is subnormal, normalize it */ |
| +22: |
| + cp r7, 0x7ff |
| + brge 2f /* Check op1 for NaN or Inf */ |
| + |
| + /* Unpack op2 to 2.62 format*/ |
| + /* exp: r6 */ |
| + /* sf: r9, r8 */ |
| + lsr r6, r9, 20 /* Extract exponent */ |
| + |
| + lsl r9, 9 /* Extract mantissa, leave room for implicit bit */ |
| + or r9, r9, r8>>23 |
| + lsl r8, 9 |
| + sbr r9, 29 /* Insert implicit bit */ |
| + andh r9, 0x3fff /*Mask last part of exponent since we use 2.62 format*/ |
| + |
| + cbr r6, 11 /* Clear sign bit */ |
| + /* Check if normalization is needed */ |
| + breq 13f /*If number is subnormal, normalize it */ |
| +23: |
| + cp r6, 0x7ff |
| + brge 3f /* Check op2 for NaN or Inf */ |
| + |
| + /* Calculate new exponent */ |
| + sub r7, r6 |
| + sub r7,-1023 |
| + |
| + /* Divide */ |
| + /* Approximating 1/d with the following recurrence: */ |
| + /* R[j+1] = R[j]*(2-R[j]*d) */ |
| + /* Using 2.62 format */ |
| + /* TWO: r12 */ |
| + /* d = op2 = divisor (2.62 format): r9,r8 */ |
| + /* Multiply result : r5, r4 */ |
| + /* Initial guess : r3, r2 */ |
| + /* New approximations : r3, r2 */ |
| + /* op1 = Dividend (2.62 format) : r11, r10 */ |
| + |
| + mov_imm r12, 0x80000000 |
| + |
| + /* Load initial guess, using look-up table */ |
| + /* Initial guess is of format 01.XY, where XY is constructed as follows: */ |
| + /* Let d be of following format: 00.1xy....., then XY=~xy */ |
| + /* For d=00.100 = 0,5 -> initial guess=01.11 = 1,75 */ |
| + /* For d=00.101 = 0,625 -> initial guess=01.11 = 1,5 */ |
| + /* For d=00.110 = 0,75 -> initial guess=01.11 = 1,25 */ |
| + /* For d=00.111 = 0,875 -> initial guess=01.11 = 1,0 */ |
| + /* r2 is also part of the reg pair forming initial guess, but it*/ |
| + /* is kept uninitialized to save one cycle since it has so low significance*/ |
| + |
| + lsr r3, r12, 1 |
| + bfextu r4, r9, 27, 2 |
| + com r4 |
| + bfins r3, r4, 28, 2 |
| + |
| + /* First approximation */ |
| + /* Approximating to 32 bits */ |
| + /* r5 = R[j]*d */ |
| + mulu.d r4, r3, r9 |
| + /* r5 = 2-R[j]*d */ |
| + sub r5, r12, r5<<2 |
| + /* r3 = R[j]*(2-R[j]*d) */ |
| + mulu.d r4, r3, r5 |
| + lsl r3, r5, 2 |
| + |
| + /* Second approximation */ |
| + /* Approximating to 32 bits */ |
| + /* r5 = R[j]*d */ |
| + mulu.d r4, r3, r9 |
| + /* r5 = 2-R[j]*d */ |
| + sub r5, r12, r5<<2 |
| + /* r3 = R[j]*(2-R[j]*d) */ |
| + mulu.d r4, r3, r5 |
| + lsl r3, r5, 2 |
| + |
| + /* Third approximation */ |
| + /* Approximating to 32 bits */ |
| + /* r5 = R[j]*d */ |
| + mulu.d r4, r3, r9 |
| + /* r5 = 2-R[j]*d */ |
| + sub r5, r12, r5<<2 |
| + /* r3 = R[j]*(2-R[j]*d) */ |
| + mulu.d r4, r3, r5 |
| + lsl r3, r5, 2 |
| + |
| + /* Fourth approximation */ |
| + /* Approximating to 64 bits */ |
| + /* r5,r4 = R[j]*d */ |
| + mul_approx_df r3 /*ah*/, r2 /*al*/, r9 /*bh*/, r8 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/ |
| + lsl r5, 2 |
| + or r5, r5, r4>>30 |
| + lsl r4, 2 |
| + /* r5,r4 = 2-R[j]*d */ |
| + neg r4 |
| + sbc r5, r12, r5 |
| + /* r3,r2 = R[j]*(2-R[j]*d) */ |
| + mul_approx_df r3 /*ah*/, r2 /*al*/, r5 /*bh*/, r4 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/ |
| + lsl r3, r5, 2 |
| + or r3, r3, r4>>30 |
| + lsl r2, r4, 2 |
| + |
| + |
| + /* Fifth approximation */ |
| + /* Approximating to 64 bits */ |
| + /* r5,r4 = R[j]*d */ |
| + mul_approx_df r3 /*ah*/, r2 /*al*/, r9 /*bh*/, r8 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/ |
| + lsl r5, 2 |
| + or r5, r5, r4>>30 |
| + lsl r4, 2 |
| + /* r5,r4 = 2-R[j]*d */ |
| + neg r4 |
| + sbc r5, r12, r5 |
| + /* r3,r2 = R[j]*(2-R[j]*d) */ |
| + mul_approx_df r3 /*ah*/, r2 /*al*/, r5 /*bh*/, r4 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/ |
| + lsl r3, r5, 2 |
| + or r3, r3, r4>>30 |
| + lsl r2, r4, 2 |
| + |
| + |
| + /* Multiply with dividend to get quotient */ |
| + mul_approx_df r3 /*ah*/, r2 /*al*/, r11 /*bh*/, r10 /*bl*/, r3 /*rh*/, r2 /*rl*/, r1 /*sh*/, r0 /*sl*/ |
| + |
| + |
| + /* To increase speed, this result is not corrected before final rounding.*/ |
| + /* This may give a difference to IEEE compliant code of 1 ULP.*/ |
| + |
| + |
| + /* Adjust exponent and mantissa */ |
| + /* r7:exp, [r3, r2]:mant, [r5, r4]:scratch*/ |
| + /* Mantissa may be of the format 0.xxxx or 1.xxxx. */ |
| + /* In the first case, shift one pos to left.*/ |
| + bld r3, 31-3 |
| + breq 0f |
| + lsl r2, 1 |
| + rol r3 |
| + sub r7, 1 |
| +#if defined(L_avr32_f64_div) |
| + /* We must scale down the dividend to 5.59 format. */ |
| + lsr r10, 3 |
| + or r10, r10, r11 << 29 |
| + lsr r11, 3 |
| + rjmp 1f |
| +#endif |
| +0: |
| +#if defined(L_avr32_f64_div) |
| + /* We must scale down the dividend to 6.58 format. */ |
| + lsr r10, 4 |
| + or r10, r10, r11 << 28 |
| + lsr r11, 4 |
| +1: |
| +#endif |
| + cp r7, 0 |
| + brle __avr32_f64_div_res_subnormal /* Result was subnormal. */ |
| + |
| + |
| +#if defined(L_avr32_f64_div) |
| + /* In order to round correctly we calculate the remainder: |
| + Remainder = dividend[11:r10] - divisor[r9:r8]*quotient[r3:r2] |
| + for the case when the quotient is halfway between the round-up |
| + value and the round down value. If the remainder then is negative |
| + it means that the quotient was to big and that it should not be |
| + rounded up, if the remainder is positive the quotient was to small |
| + and we need to round up. If the remainder is zero it means that the |
| + quotient is exact but since we need to remove the guard bit we should |
| + round to even. */ |
| + |
| + /* Truncate and add guard bit. */ |
| + andl r2, 0xff00 |
| + orl r2, 0x0080 |
| + |
| + |
| + /* Now do the multiplication. The quotient has the format 4.60 |
| + while the divisor has the format 2.62 which gives a result |
| + of 6.58 */ |
| + mulu.d r0, r3, r8 |
| + macu.d r0, r2, r9 |
| + mulu.d r4, r2, r8 |
| + mulu.d r8, r3, r9 |
| + add r5, r0 |
| + adc r8, r8, r1 |
| + acr r9 |
| + |
| + |
| + /* Check if remainder is positive, negative or equal. */ |
| + bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */ |
| + cp r4, 0 |
| + cpc r5 |
| +__avr32_f64_div_round_subnormal: |
| + cpc r8, r10 |
| + cpc r9, r11 |
| + srlo r6 /* Remainder positive: we need to round up.*/ |
| + moveq r6, r12 /* Remainder zero: round up if mantissa odd. */ |
| +#else |
| + bfextu r6, r2, 7, 1 /* Get guard bit */ |
| +#endif |
| + /* Final packing, scale down mantissa. */ |
| + lsr r10, r2, 8 |
| + or r10, r10, r3<<24 |
| + lsr r11, r3, 8 |
| + /* Insert exponent and sign bit*/ |
| + bfins r11, r7, 20, 11 |
| + bld lr, 31 |
| + bst r11, 31 |
| + |
| + /* Final rounding */ |
| + add r10, r6 |
| + acr r11 |
| + |
| + /* Return result in [r11,r10] */ |
| + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc |
| + |
| + |
| +2: |
| + /* Op1 is NaN or inf */ |
| + andh r11, 0x000f /* Extract mantissa */ |
| + or r11, r10 |
| + brne 16f /* Return NaN if op1 is NaN */ |
| + /* Op1 is inf check op2 */ |
| + lsr r6, r9, 20 /* Extract exponent */ |
| + cbr r6, 8 /* Clear sign bit */ |
| + cp r6, 0x7ff |
| + brne 17f /* Inf/number gives inf, return inf */ |
| + rjmp 16f /* The rest gives NaN*/ |
| + |
| +3: |
| + /* Op1 is a valid number. Op 2 is NaN or inf */ |
| + andh r9, 0x000f /* Extract mantissa */ |
| + or r9, r8 |
| + brne 16f /* Return NaN if op2 is NaN */ |
| + rjmp 15f /* Op2 was inf, return zero*/ |
| + |
| +11: /* Op1 was denormal. Fix it. */ |
| + lsl r11, 3 |
| + or r11, r11, r10 >> 29 |
| + lsl r10, 3 |
| + /* Check if op1 is zero. */ |
| + or r4, r10, r11 |
| + breq __avr32_f64_div_op1_zero |
| + normalize_df r7 /*exp*/, r10, r11 /*Mantissa*/, r4, r5 /*scratch*/ |
| + lsr r10, 2 |
| + or r10, r10, r11 << 30 |
| + lsr r11, 2 |
| + rjmp 22b |
| + |
| + |
| +13: /* Op2 was denormal. Fix it */ |
| + lsl r9, 3 |
| + or r9, r9, r8 >> 29 |
| + lsl r8, 3 |
| + /* Check if op2 is zero. */ |
| + or r4, r9, r8 |
| + breq 17f /* Divisor is zero -> return Inf */ |
| + normalize_df r6 /*exp*/, r8, r9 /*Mantissa*/, r4, r5 /*scratch*/ |
| + lsr r8, 2 |
| + or r8, r8, r9 << 30 |
| + lsr r9, 2 |
| + rjmp 23b |
| + |
| + |
| +__avr32_f64_div_res_subnormal:/* Divide result was subnormal. */ |
| +#if defined(L_avr32_f64_div) |
| + /* Check how much we must scale down the mantissa. */ |
| + neg r7 |
| + sub r7, -1 /* We do no longer have an implicit bit. */ |
| + satu r7 >> 0, 6 /* Saturate shift amount to max 63. */ |
| + cp.w r7, 32 |
| + brge 0f |
| + /* Shift amount <32 */ |
| + /* Scale down quotient */ |
| + rsub r6, r7, 32 |
| + lsr r2, r2, r7 |
| + lsl r12, r3, r6 |
| + or r2, r12 |
| + lsr r3, r3, r7 |
| + /* Scale down the dividend to match the scaling of the quotient. */ |
| + lsl r1, r10, r6 |
| + lsr r10, r10, r7 |
| + lsl r12, r11, r6 |
| + or r10, r12 |
| + lsr r11, r11, r7 |
| + mov r0, 0 |
| + rjmp 1f |
| +0: |
| + /* Shift amount >=32 */ |
| + rsub r6, r7, 32 |
| + moveq r0, 0 |
| + moveq r12, 0 |
| + breq 0f |
| + lsl r0, r10, r6 |
| + lsl r12, r11, r6 |
| +0: |
| + lsr r2, r3, r7 |
| + mov r3, 0 |
| + /* Scale down the dividend to match the scaling of the quotient. */ |
| + lsr r1, r10, r7 |
| + or r1, r12 |
| + lsr r10, r11, r7 |
| + mov r11, 0 |
| +1: |
| + /* Start performing the same rounding as done for normal numbers |
| + but this time we have scaled the quotient and dividend and hence |
| + need a little different comparison. */ |
| + /* Truncate and add guard bit. */ |
| + andl r2, 0xff00 |
| + orl r2, 0x0080 |
| + |
| + /* Now do the multiplication. */ |
| + mulu.d r6, r3, r8 |
| + macu.d r6, r2, r9 |
| + mulu.d r4, r2, r8 |
| + mulu.d r8, r3, r9 |
| + add r5, r6 |
| + adc r8, r8, r7 |
| + acr r9 |
| + |
| + /* Set exponent to 0 */ |
| + mov r7, 0 |
| + |
| + /* Check if remainder is positive, negative or equal. */ |
| + bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */ |
| + cp r4, r0 |
| + cpc r5, r1 |
| + /* Now the rest of the rounding is the same as for normals. */ |
| + rjmp __avr32_f64_div_round_subnormal |
| + |
| +#endif |
| +15: |
| + /* Flush to zero for the fast version. */ |
| + mov r11, lr /*Get correct sign*/ |
| + andh r11, 0x8000, COH |
| + mov r10, 0 |
| + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc |
| + |
| +16: /* Return NaN. */ |
| + mov r11, -1 |
| + mov r10, -1 |
| + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc |
| + |
| +17: /* Return INF. */ |
| + mov r11, lr /*Get correct sign*/ |
| + andh r11, 0x8000, COH |
| + orh r11, 0x7ff0 |
| + mov r10, 0 |
| + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc |
| + |
| +__avr32_f64_div_op1_zero: |
| + or r5, r8, r9 << 1 |
| + breq 16b /* 0.0/0.0 -> NaN */ |
| + bfextu r4, r9, 20, 11 |
| + cp r4, 0x7ff |
| + brne 15b /* Return zero */ |
| + /* Check if divisor is Inf or NaN */ |
| + or r5, r8, r9 << 12 |
| + breq 15b /* Divisor is inf -> return zero */ |
| + rjmp 16b /* Return NaN */ |
| + |
| + |
| + |
| + |
| +#endif |
| + |
| +#if defined(L_avr32_f32_addsub) || defined(L_avr32_f32_addsub_fast) |
| + |
| + .align 2 |
| +__avr32_f32_sub_from_add: |
| + /* Switch sign on op2 */ |
| + eorh r11, 0x8000 |
| + |
| +#if defined(L_avr32_f32_addsub_fast) |
| + .global __avr32_f32_sub_fast |
| + .type __avr32_f32_sub_fast,@function |
| +__avr32_f32_sub_fast: |
| +#else |
| + .global __avr32_f32_sub |
| + .type __avr32_f32_sub,@function |
| +__avr32_f32_sub: |
| +#endif |
| + |
| + /* Check signs */ |
| + eor r8, r11, r12 |
| + /* Different signs, use subtraction. */ |
| + brmi __avr32_f32_add_from_sub |
| + |
| + /* Get sign of op1 */ |
| + mov r8, r12 |
| + andh r12, 0x8000, COH |
| + |
| + /* Remove sign from operands */ |
| + cbr r11, 31 |
| +#if defined(L_avr32_f32_addsub_fast) |
| + reteq r8 /* If op2 is zero return op1 */ |
| +#endif |
| + cbr r8, 31 |
| + |
| + /* Put the number with the largest exponent in r10 |
| + and the number with the smallest exponent in r9 */ |
| + max r10, r8, r11 |
| + min r9, r8, r11 |
| + cp r10, r8 /*If largest operand (in R10) is not equal to op1*/ |
| + subne r12, 1 /* Subtract 1 from sign, which will invert MSB of r12*/ |
| + andh r12, 0x8000, COH /*Mask all but MSB*/ |
| + |
| + /* Unpack exponent and mantissa of op1 */ |
| + lsl r8, r10, 8 |
| + sbr r8, 31 /* Set implicit bit. */ |
| + lsr r10, 23 |
| + |
| + /* op1 is NaN or Inf. */ |
| + cp.w r10, 0xff |
| + breq __avr32_f32_sub_op1_nan_or_inf |
| + |
| + /* Unpack exponent and mantissa of op2 */ |
| + lsl r11, r9, 8 |
| + sbr r11, 31 /* Set implicit bit. */ |
| + lsr r9, 23 |
| + |
| +#if defined(L_avr32_f32_addsub) |
| + /* Keep sticky bit for correct IEEE rounding */ |
| + st.w --sp, r12 |
| + |
| + /* op2 is either zero or subnormal. */ |
| + breq __avr32_f32_sub_op2_subnormal |
| +0: |
| + /* Get shift amount to scale mantissa of op2. */ |
| + sub r12, r10, r9 |
| + |
| + breq __avr32_f32_sub_shift_done |
| + |
| + /* Saturate the shift amount to 31. If the amount |
| + is any larger op2 is insignificant. */ |
| + satu r12 >> 0, 5 |
| + |
| + /* Put the remaining bits into r9.*/ |
| + rsub r9, r12, 32 |
| + lsl r9, r11, r9 |
| + |
| + /* If the remaining bits are non-zero then we must subtract one |
| + more from opL. */ |
| + subne r8, 1 |
| + srne r9 /* LSB of r9 represents sticky bits. */ |
| + |
| + /* Shift mantissa of op2 to same decimal point as the mantissa |
| + of op1. */ |
| + lsr r11, r11, r12 |
| + |
| + |
| +__avr32_f32_sub_shift_done: |
| + /* Now subtract the mantissas. */ |
| + sub r8, r11 |
| + |
| + ld.w r12, sp++ |
| + |
| + /* Normalize resulting mantissa. */ |
| + clz r11, r8 |
| + |
| + retcs 0 |
| + lsl r8, r8, r11 |
| + sub r10, r11 |
| + brle __avr32_f32_sub_subnormal_result |
| + |
| + /* Insert the bits we will remove from the mantissa into r9[31:24] */ |
| + or r9, r9, r8 << 24 |
| +#else |
| + /* Ignore sticky bit to simplify and speed up rounding */ |
| + /* op2 is either zero or subnormal. */ |
| + breq __avr32_f32_sub_op2_subnormal |
| +0: |
| + /* Get shift amount to scale mantissa of op2. */ |
| + rsub r9, r10 |
| + |
| + /* Saturate the shift amount to 31. If the amount |
| + is any larger op2 is insignificant. */ |
| + satu r9 >> 0, 5 |
| + |
| + /* Shift mantissa of op2 to same decimal point as the mantissa |
| + of op1. */ |
| + lsr r11, r11, r9 |
| + |
| + /* Now subtract the mantissas. */ |
| + sub r8, r11 |
| + |
| + /* Normalize resulting mantissa. */ |
| + clz r9, r8 |
| + retcs 0 |
| + lsl r8, r8, r9 |
| + sub r10, r9 |
| + brle __avr32_f32_sub_subnormal_result |
| +#endif |
| + |
| + /* Pack result. */ |
| + or r12, r12, r8 >> 8 |
| + bfins r12, r10, 23, 8 |
| + |
| + /* Round */ |
| +__avr32_f32_sub_round: |
| +#if defined(L_avr32_f32_addsub) |
| + mov_imm r10, 0x80000000 |
| + bld r12, 0 |
| + subne r10, -1 |
| + cp.w r9, r10 |
| + subhs r12, -1 |
| +#else |
| + bld r8, 7 |
| + acr r12 |
| +#endif |
| + |
| + ret r12 |
| + |
| + |
| +__avr32_f32_sub_op2_subnormal: |
| + /* Fix implicit bit and adjust exponent of subnormals. */ |
| + cbr r11, 31 |
| + /* Set exponent to 1 if we do not have a zero. */ |
| + movne r9,1 |
| + |
| + /* Check if op1 is also subnormal. */ |
| + cp.w r10, 0 |
| + brne 0b |
| + |
| + cbr r8, 31 |
| + /* If op1 is not zero set exponent to 1. */ |
| + movne r10,1 |
| + |
| + rjmp 0b |
| + |
| +__avr32_f32_sub_op1_nan_or_inf: |
| + /* Check if op1 is NaN, if so return NaN */ |
| + lsl r11, r8, 1 |
| + retne -1 |
| + |
| + /* op1 is Inf. */ |
| + bfins r12, r10, 23, 8 /* Generate Inf in r12 */ |
| + |
| + /* Check if op2 is Inf. or NaN */ |
| + lsr r11, r9, 23 |
| + cp.w r11, 0xff |
| + retne r12 /* op2 not Inf or NaN, return op1 */ |
| + |
| + ret -1 /* op2 Inf or NaN, return NaN */ |
| + |
| +__avr32_f32_sub_subnormal_result: |
| + /* Check if the number is so small that |
| + it will be represented with zero. */ |
| + rsub r10, r10, 9 |
| + rsub r11, r10, 32 |
| + retcs 0 |
| + |
| + /* Shift the mantissa into the correct position.*/ |
| + lsr r10, r8, r10 |
| + /* Add sign bit. */ |
| + or r12, r10 |
| + |
| + /* Put the shifted out bits in the most significant part |
| + of r8. */ |
| + lsl r8, r8, r11 |
| + |
| +#if defined(L_avr32_f32_addsub) |
| + /* Add all the remainder bits used for rounding into r9 */ |
| + or r9, r8 |
| +#else |
| + lsr r8, 24 |
| +#endif |
| + rjmp __avr32_f32_sub_round |
| + |
| + |
| + .align 2 |
| + |
| +__avr32_f32_add_from_sub: |
| + /* Switch sign on op2 */ |
| + eorh r11, 0x8000 |
| + |
| +#if defined(L_avr32_f32_addsub_fast) |
| + .global __avr32_f32_add_fast |
| + .type __avr32_f32_add_fast,@function |
| +__avr32_f32_add_fast: |
| +#else |
| + .global __avr32_f32_add |
| + .type __avr32_f32_add,@function |
| +__avr32_f32_add: |
| +#endif |
| + |
| + /* Check signs */ |
| + eor r8, r11, r12 |
| + /* Different signs, use subtraction. */ |
| + brmi __avr32_f32_sub_from_add |
| + |
| + /* Get sign of op1 */ |
| + mov r8, r12 |
| + andh r12, 0x8000, COH |
| + |
| + /* Remove sign from operands */ |
| + cbr r11, 31 |
| +#if defined(L_avr32_f32_addsub_fast) |
| + reteq r8 /* If op2 is zero return op1 */ |
| +#endif |
| + cbr r8, 31 |
| + |
| + /* Put the number with the largest exponent in r10 |
| + and the number with the smallest exponent in r9 */ |
| + max r10, r8, r11 |
| + min r9, r8, r11 |
| + |
| + /* Unpack exponent and mantissa of op1 */ |
| + lsl r8, r10, 8 |
| + sbr r8, 31 /* Set implicit bit. */ |
| + lsr r10, 23 |
| + |
| + /* op1 is NaN or Inf. */ |
| + cp.w r10, 0xff |
| + breq __avr32_f32_add_op1_nan_or_inf |
| + |
| + /* Unpack exponent and mantissa of op2 */ |
| + lsl r11, r9, 8 |
| + sbr r11, 31 /* Set implicit bit. */ |
| + lsr r9, 23 |
| + |
| +#if defined(L_avr32_f32_addsub) |
| + /* op2 is either zero or subnormal. */ |
| + breq __avr32_f32_add_op2_subnormal |
| +0: |
| + /* Keep sticky bit for correct IEEE rounding */ |
| + st.w --sp, r12 |
| + |
| + /* Get shift amount to scale mantissa of op2. */ |
| + rsub r9, r10 |
| + |
| + /* Saturate the shift amount to 31. If the amount |
| + is any larger op2 is insignificant. */ |
| + satu r9 >> 0, 5 |
| + |
| + /* Shift mantissa of op2 to same decimal point as the mantissa |
| + of op1. */ |
| + lsr r12, r11, r9 |
| + |
| + /* Put the remainding bits into r11[23:..].*/ |
| + rsub r9, r9, (32-8) |
| + lsl r11, r11, r9 |
| + /* Insert the bits we will remove from the mantissa into r11[31:24] */ |
| + bfins r11, r12, 24, 8 |
| + |
| + /* Now add the mantissas. */ |
| + add r8, r12 |
| + |
| + ld.w r12, sp++ |
| +#else |
| + /* Ignore sticky bit to simplify and speed up rounding */ |
| + /* op2 is either zero or subnormal. */ |
| + breq __avr32_f32_add_op2_subnormal |
| +0: |
| + /* Get shift amount to scale mantissa of op2. */ |
| + rsub r9, r10 |
| + |
| + /* Saturate the shift amount to 31. If the amount |
| + is any larger op2 is insignificant. */ |
| + satu r9 >> 0, 5 |
| + |
| + /* Shift mantissa of op2 to same decimal point as the mantissa |
| + of op1. */ |
| + lsr r11, r11, r9 |
| + |
| + /* Now add the mantissas. */ |
| + add r8, r11 |
| + |
| +#endif |
| + /* Check if we overflowed. */ |
| + brcs __avr32_f32_add_res_of |
| +1: |
| + /* Pack result. */ |
| + or r12, r12, r8 >> 8 |
| + bfins r12, r10, 23, 8 |
| + |
| + /* Round */ |
| +#if defined(L_avr32_f32_addsub) |
| + mov_imm r10, 0x80000000 |
| + bld r12, 0 |
| + subne r10, -1 |
| + cp.w r11, r10 |
| + subhs r12, -1 |
| +#else |
| + bld r8, 7 |
| + acr r12 |
| +#endif |
| + |
| + ret r12 |
| + |
| +__avr32_f32_add_op2_subnormal: |
| + /* Fix implicit bit and adjust exponent of subnormals. */ |
| + cbr r11, 31 |
| + /* Set exponent to 1 if we do not have a zero. */ |
| + movne r9,1 |
| + |
| + /* Check if op1 is also subnormal. */ |
| + cp.w r10, 0 |
| + brne 0b |
| + /* Both operands subnormal, just add the mantissas and |
| + pack. If the addition of the subnormal numbers results |
| + in a normal number then the exponent will automatically |
| + be set to 1 by the addition. */ |
| + cbr r8, 31 |
| + add r11, r8 |
| + or r12, r12, r11 >> 8 |
| + ret r12 |
| + |
| +__avr32_f32_add_op1_nan_or_inf: |
| + /* Check if op1 is NaN, if so return NaN */ |
| + lsl r11, r8, 1 |
| + retne -1 |
| + |
| + /* op1 is Inf. */ |
| + bfins r12, r10, 23, 8 /* Generate Inf in r12 */ |
| + |
| + /* Check if op2 is Inf. or NaN */ |
| + lsr r11, r9, 23 |
| + cp.w r11, 0xff |
| + retne r12 /* op2 not Inf or NaN, return op1 */ |
| + |
| + lsl r9, 9 |
| + reteq r12 /* op2 Inf return op1 */ |
| + ret -1 /* op2 is NaN, return NaN */ |
| + |
| +__avr32_f32_add_res_of: |
| + /* We overflowed. Increase exponent and shift mantissa.*/ |
| + lsr r8, 1 |
| + sub r10, -1 |
| + |
| + /* Clear mantissa to set result to Inf if the exponent is 255. */ |
| + cp.w r10, 255 |
| + moveq r8, 0 |
| + moveq r11, 0 |
| + rjmp 1b |
| + |
| + |
| +#endif |
| + |
| + |
| +#if defined(L_avr32_f32_div) || defined(L_avr32_f32_div_fast) |
| + .align 2 |
| + |
| +#if defined(L_avr32_f32_div_fast) |
| + .global __avr32_f32_div_fast |
| + .type __avr32_f32_div_fast,@function |
| +__avr32_f32_div_fast: |
| +#else |
| + .global __avr32_f32_div |
| + .type __avr32_f32_div,@function |
| +__avr32_f32_div: |
| +#endif |
| + |
| + eor r8, r11, r12 /* MSB(r8) = Sign(op1) ^ Sign(op2) */ |
| + |
| + /* Unpack */ |
| + lsl r12,1 |
| + reteq 0 /* Return zero if op1 is zero */ |
| + lsl r11,1 |
| + breq 4f /* Check op2 for zero */ |
| + |
| + /* Unpack op1*/ |
| + /* exp: r9 */ |
| + /* sf: r12 */ |
| + lsr r9, r12, 24 |
| + breq 11f /*If number is subnormal*/ |
| + cp r9, 0xff |
| + brhs 2f /* Check op1 for NaN or Inf */ |
| + lsl r12, 7 |
| + sbr r12, 31 /*Implicit bit*/ |
| +12: |
| + |
| + /* Unpack op2*/ |
| + /* exp: r10 */ |
| + /* sf: r11 */ |
| + lsr r10, r11, 24 |
| + breq 13f /*If number is subnormal*/ |
| + cp r10, 0xff |
| + brhs 3f /* Check op2 for NaN or Inf */ |
| + |
| + lsl r11,7 |
| + sbr r11, 31 /*Implicit bit*/ |
| +14: |
| + |
| + /* For UC3, store with predecrement is faster than stm */ |
| + st.w --sp, r5 |
| + st.d --sp, r6 |
| + |
| + /* Calculate new exponent */ |
| + sub r9, r10 |
| + sub r9,-127 |
| + |
| + /* Divide */ |
| + /* Approximating 1/d with the following recurrence: */ |
| + /* R[j+1] = R[j]*(2-R[j]*d) */ |
| + /* Using 2.30 format */ |
| + /* TWO: r10 */ |
| + /* d: r5 */ |
| + /* Multiply result : r6, r7 */ |
| + /* Initial guess : r11 */ |
| + /* New approximations : r11 */ |
| + /* Dividend : r12 */ |
| + |
| + /* Load TWO */ |
| + mov_imm r10, 0x80000000 |
| + |
| + lsr r12, 2 /* Get significand of Op1 in 2.30 format */ |
| + lsr r5, r11, 2 /* Get significand of Op2 (=d) in 2.30 format */ |
| + |
| + /* Load initial guess, using look-up table */ |
| + /* Initial guess is of format 01.XY, where XY is constructed as follows: */ |
| + /* Let d be of following format: 00.1xy....., then XY=~xy */ |
| + /* For d=00.100 = 0,5 -> initial guess=01.11 = 1,75 */ |
| + /* For d=00.101 = 0,625 -> initial guess=01.11 = 1,5 */ |
| + /* For d=00.110 = 0,75 -> initial guess=01.11 = 1,25 */ |
| + /* For d=00.111 = 0,875 -> initial guess=01.11 = 1,0 */ |
| + |
| + lsr r11, r10, 1 |
| + bfextu r6, r5, 27, 2 |
| + com r6 |
| + bfins r11, r6, 28, 2 |
| + |
| + /* First approximation */ |
| + /* r7 = R[j]*d */ |
| + mulu.d r6, r11, r5 |
| + /* r7 = 2-R[j]*d */ |
| + sub r7, r10, r7<<2 |
| + /* r11 = R[j]*(2-R[j]*d) */ |
| + mulu.d r6, r11, r7 |
| + lsl r11, r7, 2 |
| + |
| + /* Second approximation */ |
| + /* r7 = R[j]*d */ |
| + mulu.d r6, r11, r5 |
| + /* r7 = 2-R[j]*d */ |
| + sub r7, r10, r7<<2 |
| + /* r11 = R[j]*(2-R[j]*d) */ |
| + mulu.d r6, r11, r7 |
| + lsl r11, r7, 2 |
| + |
| + /* Third approximation */ |
| + /* r7 = R[j]*d */ |
| + mulu.d r6, r11, r5 |
| + /* r7 = 2-R[j]*d */ |
| + sub r7, r10, r7<<2 |
| + /* r11 = R[j]*(2-R[j]*d) */ |
| + mulu.d r6, r11, r7 |
| + lsl r11, r7, 2 |
| + |
| + /* Fourth approximation */ |
| + /* r7 = R[j]*d */ |
| + mulu.d r6, r11, r5 |
| + /* r7 = 2-R[j]*d */ |
| + sub r7, r10, r7<<2 |
| + /* r11 = R[j]*(2-R[j]*d) */ |
| + mulu.d r6, r11, r7 |
| + lsl r11, r7, 2 |
| + |
| + |
| + /* Multiply with dividend to get quotient, r7 = sf(op1)/sf(op2) */ |
| + mulu.d r6, r11, r12 |
| + |
| + /* Shift by 3 to get result in 1.31 format, as required by the exponent. */ |
| + /* Note that 1.31 format is already used by the exponent in r9, since */ |
| + /* a bias of 127 was added to the result exponent, even though the implicit */ |
| + /* bit was inserted. This gives the exponent an additional bias of 1, which */ |
| + /* supports 1.31 format. */ |
| + //lsl r10, r7, 3 |
| + |
| + /* Adjust exponent and mantissa in case the result is of format |
| + 0000.1xxx to 0001.xxx*/ |
| +#if defined(L_avr32_f32_div) |
| + lsr r12, 4 /* Scale dividend to 6.26 format to match the |
| + result of the multiplication of the divisor and |
| + quotient to get the remainder. */ |
| +#endif |
| + bld r7, 31-3 |
| + breq 0f |
| + lsl r7, 1 |
| + sub r9, 1 |
| +#if defined(L_avr32_f32_div) |
| + lsl r12, 1 /* Scale dividend to 5.27 format to match the |
| + result of the multiplication of the divisor and |
| + quotient to get the remainder. */ |
| +#endif |
| +0: |
| + cp r9, 0 |
| + brle __avr32_f32_div_res_subnormal /* Result was subnormal. */ |
| + |
| + |
| +#if defined(L_avr32_f32_div) |
| + /* In order to round correctly we calculate the remainder: |
| + Remainder = dividend[r12] - divisor[r5]*quotient[r7] |
| + for the case when the quotient is halfway between the round-up |
| + value and the round down value. If the remainder then is negative |
| + it means that the quotient was to big and that it should not be |
| + rounded up, if the remainder is positive the quotient was to small |
| + and we need to round up. If the remainder is zero it means that the |
| + quotient is exact but since we need to remove the guard bit we should |
| + round to even. */ |
| + andl r7, 0xffe0 |
| + orl r7, 0x0010 |
| + |
| + /* Now do the multiplication. The quotient has the format 4.28 |
| + while the divisor has the format 2.30 which gives a result |
| + of 6.26 */ |
| + mulu.d r10, r5, r7 |
| + |
| + /* Check if remainder is positive, negative or equal. */ |
| + bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */ |
| + cp r10, 0 |
| +__avr32_f32_div_round_subnormal: |
| + cpc r11, r12 |
| + srlo r11 /* Remainder positive: we need to round up.*/ |
| + moveq r11, r5 /* Remainder zero: round up if mantissa odd. */ |
| +#else |
| + bfextu r11, r7, 4, 1 /* Get guard bit */ |
| +#endif |
| + |
| + /* Pack final result*/ |
| + lsr r12, r7, 5 |
| + bfins r12, r9, 23, 8 |
| + /* For UC3, load with postincrement is faster than ldm */ |
| + ld.d r6, sp++ |
| + ld.w r5, sp++ |
| + bld r8, 31 |
| + bst r12, 31 |
| + /* Rounding add. */ |
| + add r12, r11 |
| + ret r12 |
| + |
| +__divsf_return_op1: |
| + lsl r8, 1 |
| + ror r12 |
| + ret r12 |
| + |
| + |
| +2: |
| + /* Op1 is NaN or inf */ |
| + retne -1 /* Return NaN if op1 is NaN */ |
| + /* Op1 is inf check op2 */ |
| + mov_imm r9, 0xff000000 |
| + cp r11, r9 |
| + brlo __divsf_return_op1 /* inf/number gives inf */ |
| + ret -1 /* The rest gives NaN*/ |
| +3: |
| + /* Op2 is NaN or inf */ |
| + reteq 0 /* Return zero if number/inf*/ |
| + ret -1 /* Return NaN*/ |
| +4: |
| + /* Op2 is zero ? */ |
| + tst r12,r12 |
| + reteq -1 /* 0.0/0.0 is NaN */ |
| + /* Nonzero/0.0 is Inf. Sign bit will be shifted in before returning*/ |
| + mov_imm r12, 0xff000000 |
| + rjmp __divsf_return_op1 |
| + |
| +11: /* Op1 was denormal. Fix it. */ |
| + lsl r12,7 |
| + clz r9,r12 |
| + lsl r12,r12,r9 |
| + rsub r9,r9,1 |
| + rjmp 12b |
| + |
| +13: /* Op2 was denormal. Fix it. */ |
| + lsl r11,7 |
| + clz r10,r11 |
| + lsl r11,r11,r10 |
| + rsub r10,r10,1 |
| + rjmp 14b |
| + |
| + |
| +__avr32_f32_div_res_subnormal: /* Divide result was subnormal */ |
| +#if defined(L_avr32_f32_div) |
| + /* Check how much we must scale down the mantissa. */ |
| + neg r9 |
| + sub r9, -1 /* We do no longer have an implicit bit. */ |
| + satu r9 >> 0, 5 /* Saturate shift amount to max 32. */ |
| + /* Scale down quotient */ |
| + rsub r10, r9, 32 |
| + lsr r7, r7, r9 |
| + /* Scale down the dividend to match the scaling of the quotient. */ |
| + lsl r6, r12, r10 /* Make the divident 64-bit and put the lsw in r6 */ |
| + lsr r12, r12, r9 |
| + |
| + /* Start performing the same rounding as done for normal numbers |
| + but this time we have scaled the quotient and dividend and hence |
| + need a little different comparison. */ |
| + andl r7, 0xffe0 |
| + orl r7, 0x0010 |
| + |
| + /* Now do the multiplication. The quotient has the format 4.28 |
| + while the divisor has the format 2.30 which gives a result |
| + of 6.26 */ |
| + mulu.d r10, r5, r7 |
| + |
| + /* Set exponent to 0 */ |
| + mov r9, 0 |
| + |
| + /* Check if remainder is positive, negative or equal. */ |
| + bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */ |
| + cp r10, r6 |
| + rjmp __avr32_f32_div_round_subnormal |
| + |
| +#else |
| + ld.d r6, sp++ |
| + ld.w r5, sp++ |
| + /*Flush to zero*/ |
| + ret 0 |
| +#endif |
| +#endif |
| + |
| +#ifdef L_avr32_f32_mul |
| + .global __avr32_f32_mul |
| + .type __avr32_f32_mul,@function |
| + |
| + |
| +__avr32_f32_mul: |
| + mov r8, r12 |
| + eor r12, r11 /* MSB(r8) = Sign(op1) ^ Sign(op2) */ |
| + andh r12, 0x8000, COH |
| + |
| + /* arrange operands so that that op1 >= op2 */ |
| + cbr r8, 31 |
| + breq __avr32_f32_mul_op1_zero |
| + cbr r11, 31 |
| + |
| + /* Put the number with the largest exponent in r10 |
| + and the number with the smallest exponent in r9 */ |
| + max r10, r8, r11 |
| + min r9, r8, r11 |
| + |
| + /* Unpack exponent and mantissa of op1 */ |
| + lsl r8, r10, 8 |
| + sbr r8, 31 /* Set implicit bit. */ |
| + lsr r10, 23 |
| + |
| + /* op1 is NaN or Inf. */ |
| + cp.w r10, 0xff |
| + breq __avr32_f32_mul_op1_nan_or_inf |
| + |
| + /* Unpack exponent and mantissa of op2 */ |
| + lsl r11, r9, 8 |
| + sbr r11, 31 /* Set implicit bit. */ |
| + lsr r9, 23 |
| + |
| + /* op2 is either zero or subnormal. */ |
| + breq __avr32_f32_mul_op2_subnormal |
| +0: |
| + /* Calculate new exponent */ |
| + add r9,r10 |
| + |
| + /* Do the multiplication */ |
| + mulu.d r10,r8,r11 |
| + |
| + /* We might need to scale up by two if the MSB of the result is |
| + zero. */ |
| + lsl r8, r11, 1 |
| + movcc r11, r8 |
| + subcc r9, 1 |
| + |
| + /* Put the shifted out bits of the mantissa into r10 */ |
| + lsr r10, 8 |
| + bfins r10, r11, 24, 8 |
| + |
| + sub r9,(127-1) /* remove extra exponent bias */ |
| + brle __avr32_f32_mul_res_subnormal |
| + |
| + /* Check for Inf. */ |
| + cp.w r9, 0xff |
| + brge 1f |
| + |
| + /* Pack result. */ |
| + or r12, r12, r11 >> 8 |
| + bfins r12, r9, 23, 8 |
| + |
| + /* Round */ |
| +__avr32_f32_mul_round: |
| + mov_imm r8, 0x80000000 |
| + bld r12, 0 |
| + subne r8, -1 |
| + |
| + cp.w r10, r8 |
| + subhs r12, -1 |
| + |
| + ret r12 |
| + |
| +1: |
| + /* Return Inf */ |
| + orh r12, 0x7f80 |
| + ret r12 |
| + |
| +__avr32_f32_mul_op2_subnormal: |
| + cbr r11, 31 |
| + clz r9, r11 |
| + retcs 0 /* op2 is zero. Return 0 */ |
| + lsl r11, r11, r9 |
| + rsub r9, r9, 1 |
| + |
| + /* Check if op2 is subnormal. */ |
| + tst r10, r10 |
| + brne 0b |
| + |
| + /* op2 is subnormal */ |
| + cbr r8, 31 |
| + clz r10, r11 |
| + retcs 0 /* op1 is zero. Return 0 */ |
| + lsl r8, r8, r10 |
| + rsub r10, r10, 1 |
| + |
| + rjmp 0b |
| + |
| + |
| +__avr32_f32_mul_op1_nan_or_inf: |
| + /* Check if op1 is NaN, if so return NaN */ |
| + lsl r11, r8, 1 |
| + retne -1 |
| + |
| + /* op1 is Inf. */ |
| + tst r9, r9 |
| + reteq -1 /* Inf * 0 -> NaN */ |
| + |
| + bfins r12, r10, 23, 8 /* Generate Inf in r12 */ |
| + |
| + /* Check if op2 is Inf. or NaN */ |
| + lsr r11, r9, 23 |
| + cp.w r11, 0xff |
| + retne r12 /* op2 not Inf or NaN, return Info */ |
| + |
| + lsl r9, 9 |
| + reteq r12 /* op2 Inf return Inf */ |
| + ret -1 /* op2 is NaN, return NaN */ |
| + |
| +__avr32_f32_mul_res_subnormal: |
| + /* Check if the number is so small that |
| + it will be represented with zero. */ |
| + rsub r9, r9, 9 |
| + rsub r8, r9, 32 |
| + retcs 0 |
| + |
| + /* Shift the mantissa into the correct position.*/ |
| + lsr r9, r11, r9 |
| + /* Add sign bit. */ |
| + or r12, r9 |
| + /* Put the shifted out bits in the most significant part |
| + of r8. */ |
| + lsl r11, r11, r8 |
| + |
| + /* Add all the remainder bits used for rounding into r11 */ |
| + andh r10, 0x00FF |
| + or r10, r11 |
| + rjmp __avr32_f32_mul_round |
| + |
| +__avr32_f32_mul_op1_zero: |
| + bfextu r10, r11, 23, 8 |
| + cp.w r10, 0xff |
| + retne r12 |
| + reteq -1 |
| + |
| +#endif |
| + |
| + |
| +#ifdef L_avr32_s32_to_f32 |
| + .global __avr32_s32_to_f32 |
| + .type __avr32_s32_to_f32,@function |
| +__avr32_s32_to_f32: |
| + cp r12, 0 |
| + reteq r12 /* If zero then return zero float */ |
| + mov r11, r12 /* Keep the sign */ |
| + abs r12 /* Compute the absolute value */ |
| + mov r10, 31 + 127 /* Set the correct exponent */ |
| + |
| + /* Normalize */ |
| + normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/ |
| + |
| + /* Check for subnormal result */ |
| + cp.w r10, 0 |
| + brle __avr32_s32_to_f32_subnormal |
| + |
| + round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/ |
| + pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/ |
| + lsl r11, 1 |
| + ror r12 |
| + ret r12 |
| + |
| +__avr32_s32_to_f32_subnormal: |
| + /* Adjust a subnormal result */ |
| + adjust_subnormal_sf r12/*sf*/, r10 /*exp*/, r12 /*mant*/, r11/*sign*/, r9 /*scratch*/ |
| + ret r12 |
| + |
| +#endif |
| + |
| +#ifdef L_avr32_u32_to_f32 |
| + .global __avr32_u32_to_f32 |
| + .type __avr32_u32_to_f32,@function |
| +__avr32_u32_to_f32: |
| + cp r12, 0 |
| + reteq r12 /* If zero then return zero float */ |
| + mov r10, 31 + 127 /* Set the correct exponent */ |
| + |
| + /* Normalize */ |
| + normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/ |
| + |
| + /* Check for subnormal result */ |
| + cp.w r10, 0 |
| + brle __avr32_u32_to_f32_subnormal |
| + |
| + round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/ |
| + pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/ |
| + lsr r12,1 /* Sign bit is 0 for unsigned int */ |
| + ret r12 |
| + |
| +__avr32_u32_to_f32_subnormal: |
| + /* Adjust a subnormal result */ |
| + mov r8, 0 |
| + adjust_subnormal_sf r12/*sf*/,r10 /*exp*/, r12 /*mant*/,r8/*sign*/, r9 /*scratch*/ |
| + ret r12 |
| + |
| + |
| +#endif |
| + |
| + |
| +#ifdef L_avr32_f32_to_s32 |
| + .global __avr32_f32_to_s32 |
| + .type __avr32_f32_to_s32,@function |
| +__avr32_f32_to_s32: |
| + bfextu r11, r12, 23, 8 |
| + sub r11,127 /* Fix bias */ |
| + retlo 0 /* Negative exponent yields zero integer */ |
| + |
| + /* Shift mantissa into correct position */ |
| + rsub r11,r11,31 /* Shift amount */ |
| + lsl r10,r12,8 /* Get mantissa */ |
| + sbr r10,31 /* Add implicit bit */ |
| + lsr r10,r10,r11 /* Perform shift */ |
| + lsl r12,1 /* Check sign */ |
| + retcc r10 /* if positive, we are done */ |
| + neg r10 /* if negative float, negate result */ |
| + ret r10 |
| + |
| +#endif |
| + |
| +#ifdef L_avr32_f32_to_u32 |
| + .global __avr32_f32_to_u32 |
| + .type __avr32_f32_to_u32,@function |
| +__avr32_f32_to_u32: |
| + cp r12,0 |
| + retmi 0 /* Negative numbers gives 0 */ |
| + bfextu r11, r12, 23, 8 /* Extract exponent */ |
| + sub r11,127 /* Fix bias */ |
| + retlo 0 /* Negative exponent yields zero integer */ |
| + |
| + /* Shift mantissa into correct position */ |
| + rsub r11,r11,31 /* Shift amount */ |
| + lsl r12,8 /* Get mantissa */ |
| + sbr r12,31 /* Add implicit bit */ |
| + lsr r12,r12,r11 /* Perform shift */ |
| + ret r12 |
| + |
| +#endif |
| + |
| +#ifdef L_avr32_f32_to_f64 |
| + .global __avr32_f32_to_f64 |
| + .type __avr32_f32_to_f64,@function |
| + |
| +__avr32_f32_to_f64: |
| + lsl r11,r12,1 /* Remove sign bit, keep original value in r12*/ |
| + moveq r10, 0 |
| + reteq r11 /* Return zero if input is zero */ |
| + |
| + bfextu r9,r11,24,8 /* Get exponent */ |
| + cp.w r9,0xff /* check for NaN or inf */ |
| + breq 0f |
| + |
| + lsl r11,7 /* Convert sf mantissa to df format */ |
| + mov r10,0 |
| + |
| + /* Check if implicit bit should be set */ |
| + cp.w r9, 0 |
| + subeq r9,-1 /* Adjust exponent if it was 0 */ |
| + srne r8 |
| + or r11, r11, r8 << 31 /* Set implicit bit if needed */ |
| + sub r9,(127-0x3ff) /* Convert exponent to df format exponent */ |
| + |
| + /*We know that low register of mantissa is 0, and will be unaffected by normalization.*/ |
| + /*We can therefore use the faster normalize_sf function instead of normalize_df.*/ |
| + normalize_sf r9 /*exp*/, r11 /*mantissa*/, r8 /*scratch*/ |
| + pack_df r9 /*exp*/, r10, r11 /*mantissa*/, r10, r11 /*df*/ |
| + |
| +__extendsfdf_return_op1: |
| + /* Rotate in sign bit */ |
| + lsl r12, 1 |
| + ror r11 |
| + ret r11 |
| + |
| +0: |
| + /* Inf or NaN*/ |
| + mov_imm r10, 0xffe00000 |
| + lsl r11,8 /* check mantissa */ |
| + movne r11, -1 /* Return NaN */ |
| + moveq r11, r10 /* Return inf */ |
| + rjmp __extendsfdf_return_op1 |
| +#endif |
| + |
| + |
| +#ifdef L_avr32_f64_to_f32 |
| + .global __avr32_f64_to_f32 |
| + .type __avr32_f64_to_f32,@function |
| + |
| +__avr32_f64_to_f32: |
| + /* Unpack */ |
| + lsl r9,r11,1 /* Unpack exponent */ |
| + lsr r9,21 |
| + |
| + reteq 0 /* If exponent is 0 the number is so small |
| + that the conversion to single float gives |
| + zero */ |
| + |
| + lsl r8,r11,10 /* Adjust mantissa */ |
| + or r12,r8,r10>>22 |
| + |
| + lsl r10,10 /* Check if there are any remaining bits |
| + in the low part of the mantissa.*/ |
| + neg r10 |
| + rol r12 /* If there were remaining bits then set lsb |
| + of mantissa to 1 */ |
| + |
| + cp r9,0x7ff |
| + breq 2f /* Check for NaN or inf */ |
| + |
| + sub r9,(0x3ff-127) /* Adjust bias of exponent */ |
| + sbr r12,31 /* set the implicit bit.*/ |
| + |
| + cp.w r9, 0 /* Check for subnormal number */ |
| + brle 3f |
| + |
| + round_sf r9 /*exp*/, r12 /*mant*/, r10 /*scratch*/ |
| + pack_sf r12 /*sf*/, r9 /*exp*/, r12 /*mant*/ |
| +__truncdfsf_return_op1: |
| + /* Rotate in sign bit */ |
| + lsl r11, 1 |
| + ror r12 |
| + ret r12 |
| + |
| +2: |
| + /* NaN or inf */ |
| + cbr r12,31 /* clear implicit bit */ |
| + retne -1 /* Return NaN if mantissa not zero */ |
| + mov_imm r12, 0xff000000 |
| + ret r12 /* Return inf */ |
| + |
| +3: /* Result is subnormal. Adjust it.*/ |
| + adjust_subnormal_sf r12/*sf*/,r9 /*exp*/, r12 /*mant*/, r11/*sign*/, r10 /*scratch*/ |
| + ret r12 |
| + |
| + |
| +#endif |
| + |
| +#if defined(L_mulsi3) && (__AVR32_UC__ == 3) |
| + .global __mulsi3 |
| + .type __mulsi3,@function |
| + |
| +__mulsi3: |
| + mov r9, 0 |
| +0: |
| + lsr r11, 1 |
| + addcs r9, r9, r12 |
| + breq 1f |
| + lsl r12, 1 |
| + rjmp 0b |
| +1: |
| + ret r9 |
| +#endif |
| --- /dev/null |
| +++ b/gcc/config/avr32/lib2funcs.S |
| @@ -0,0 +1,21 @@ |
| + .align 4 |
| + .global __nonlocal_goto |
| + .type __nonlocal_goto,@function |
| + |
| +/* __nonlocal_goto: This function handles nonlocal_goto's in gcc. |
| + |
| + parameter 0 (r12) = New Frame Pointer |
| + parameter 1 (r11) = Address to goto |
| + parameter 2 (r10) = New Stack Pointer |
| + |
| + This function invalidates the return stack, since it returns from a |
| + function without using a return instruction. |
| +*/ |
| +__nonlocal_goto: |
| + mov r7, r12 |
| + mov sp, r10 |
| + frs # Flush return stack |
| + mov pc, r11 |
| + |
| + |
| + |
| --- /dev/null |
| +++ b/gcc/config/avr32/linux-elf.h |
| @@ -0,0 +1,151 @@ |
| +/* |
| + Linux/Elf specific definitions. |
| + Copyright 2003-2006 Atmel Corporation. |
| + |
| + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| + and H�vard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com> |
| + |
| + This file is part of GCC. |
| + |
| + This program is free software; you can redistribute it and/or modify |
| + it under the terms of the GNU General Public License as published by |
| + the Free Software Foundation; either version 2 of the License, or |
| + (at your option) any later version. |
| + |
| + This program is distributed in the hope that it will be useful, |
| + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + GNU General Public License for more details. |
| + |
| + You should have received a copy of the GNU General Public License |
| + along with this program; if not, write to the Free Software |
| + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ |
| + |
| + |
| + |
| +/* elfos.h should have already been included. Now just override |
| + any conflicting definitions and add any extras. */ |
| + |
| +/* Run-time Target Specification. */ |
| +#undef TARGET_VERSION |
| +#define TARGET_VERSION fputs (" (AVR32 GNU/Linux with ELF)", stderr); |
| + |
| +/* Do not assume anything about header files. */ |
| +#define NO_IMPLICIT_EXTERN_C |
| + |
| +/* The GNU C++ standard library requires that these macros be defined. */ |
| +#undef CPLUSPLUS_CPP_SPEC |
| +#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)" |
| + |
| +/* Now we define the strings used to build the spec file. */ |
| +#undef LIB_SPEC |
| +#define LIB_SPEC \ |
| + "%{pthread:-lpthread} \ |
| + %{shared:-lc} \ |
| + %{!shared:%{profile:-lc_p}%{!profile:-lc}}" |
| + |
| +/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add |
| + the GNU/Linux magical crtbegin.o file (see crtstuff.c) which |
| + provides part of the support for getting C++ file-scope static |
| + object constructed before entering `main'. */ |
| + |
| +#undef STARTFILE_SPEC |
| +#define STARTFILE_SPEC \ |
| + "%{!shared: \ |
| + %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \ |
| + %{!p:%{profile:gcrt1.o%s} \ |
| + %{!profile:crt1.o%s}}}} \ |
| + crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}" |
| + |
| +/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on |
| + the GNU/Linux magical crtend.o file (see crtstuff.c) which |
| + provides part of the support for getting C++ file-scope static |
| + object constructed before entering `main', followed by a normal |
| + GNU/Linux "finalizer" file, `crtn.o'. */ |
| + |
| +#undef ENDFILE_SPEC |
| +#define ENDFILE_SPEC \ |
| + "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s" |
| + |
| +#undef ASM_SPEC |
| +#define ASM_SPEC "%{!mno-pic:%{!fno-pic:--pic}} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{mcpu=*:-mcpu=%*}" |
| + |
| +#undef LINK_SPEC |
| +#define LINK_SPEC "%{version:-v} \ |
| + %{static:-Bstatic} \ |
| + %{shared:-shared} \ |
| + %{symbolic:-Bsymbolic} \ |
| + %{rdynamic:-export-dynamic} \ |
| + %{!dynamic-linker:-dynamic-linker /lib/ld-uClibc.so.0} \ |
| + %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}}" |
| + |
| +#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS() |
| + |
| +/* This is how we tell the assembler that two symbols have the same value. */ |
| +#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \ |
| + do \ |
| + { \ |
| + assemble_name (FILE, NAME1); \ |
| + fputs (" = ", FILE); \ |
| + assemble_name (FILE, NAME2); \ |
| + fputc ('\n', FILE); \ |
| + } \ |
| + while (0) |
| + |
| + |
| + |
| +#undef CC1_SPEC |
| +#define CC1_SPEC "%{profile:-p}" |
| + |
| +/* Target CPU builtins. */ |
| +#define TARGET_CPU_CPP_BUILTINS() \ |
| + do \ |
| + { \ |
| + builtin_define ("__avr32__"); \ |
| + builtin_define ("__AVR32__"); \ |
| + builtin_define ("__AVR32_LINUX__"); \ |
| + builtin_define (avr32_part->macro); \ |
| + builtin_define (avr32_arch->macro); \ |
| + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \ |
| + builtin_define ("__AVR32_AVR32A__"); \ |
| + else \ |
| + builtin_define ("__AVR32_AVR32B__"); \ |
| + if (TARGET_UNALIGNED_WORD) \ |
| + builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \ |
| + if (TARGET_SIMD) \ |
| + builtin_define ("__AVR32_HAS_SIMD__"); \ |
| + if (TARGET_DSP) \ |
| + builtin_define ("__AVR32_HAS_DSP__"); \ |
| + if (TARGET_RMW) \ |
| + builtin_define ("__AVR32_HAS_RMW__"); \ |
| + if (TARGET_BRANCH_PRED) \ |
| + builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \ |
| + if (TARGET_FAST_FLOAT) \ |
| + builtin_define ("__AVR32_FAST_FLOAT__"); \ |
| + } \ |
| + while (0) |
| + |
| + |
| + |
| +/* Call the function profiler with a given profile label. */ |
| +#undef FUNCTION_PROFILER |
| +#define FUNCTION_PROFILER(STREAM, LABELNO) \ |
| + do \ |
| + { \ |
| + fprintf (STREAM, "\tmov\tlr, lo(mcount)\n\torh\tlr, hi(mcount)\n"); \ |
| + fprintf (STREAM, "\ticall lr\n"); \ |
| + } \ |
| + while (0) |
| + |
| +#define NO_PROFILE_COUNTERS 1 |
| + |
| +/* For dynamic libraries to work */ |
| +/* #define PLT_REG_CALL_CLOBBERED 1 */ |
| +#define AVR32_ALWAYS_PIC 1 |
| + |
| +/* uclibc does not implement sinf, cosf etc. */ |
| +#undef TARGET_C99_FUNCTIONS |
| +#define TARGET_C99_FUNCTIONS 0 |
| + |
| +#define LINK_GCC_C_SEQUENCE_SPEC \ |
| + "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}" |
| --- /dev/null |
| +++ b/gcc/config/avr32/predicates.md |
| @@ -0,0 +1,419 @@ |
| +;; AVR32 predicates file. |
| +;; Copyright 2003-2006 Atmel Corporation. |
| +;; |
| +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| +;; |
| +;; This file is part of GCC. |
| +;; |
| +;; This program is free software; you can redistribute it and/or modify |
| +;; it under the terms of the GNU General Public License as published by |
| +;; the Free Software Foundation; either version 2 of the License, or |
| +;; (at your option) any later version. |
| +;; |
| +;; This program is distributed in the hope that it will be useful, |
| +;; but WITHOUT ANY WARRANTY; without even the implied warranty of |
| +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| +;; GNU General Public License for more details. |
| +;; |
| +;; You should have received a copy of the GNU General Public License |
| +;; along with this program; if not, write to the Free Software |
| +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| + |
| + |
| +;; True if the operand is a memory reference which contains an |
| +;; Address consisting of a single pointer register |
| +(define_predicate "avr32_indirect_register_operand" |
| + (and (match_code "mem") |
| + (match_test "register_operand(XEXP(op, 0), SImode)"))) |
| + |
| + |
| + |
| +;; Address expression with a base pointer offset with |
| +;; a register displacement |
| +(define_predicate "avr32_indexed_memory_operand" |
| + (and (match_code "mem") |
| + (match_test "GET_CODE(XEXP(op, 0)) == PLUS")) |
| + { |
| + |
| + rtx op0 = XEXP(XEXP(op, 0), 0); |
| + rtx op1 = XEXP(XEXP(op, 0), 1); |
| + |
| + return ((avr32_address_register_rtx_p (op0, 0) |
| + && avr32_legitimate_index_p (GET_MODE(op), op1, 0)) |
| + || (avr32_address_register_rtx_p (op1, 0) |
| + && avr32_legitimate_index_p (GET_MODE(op), op0, 0))); |
| + |
| + }) |
| + |
| +;; Operand suitable for the ld.sb instruction |
| +(define_predicate "load_sb_memory_operand" |
| + (ior (match_operand 0 "avr32_indirect_register_operand") |
| + (match_operand 0 "avr32_indexed_memory_operand"))) |
| + |
| + |
| +;; Operand suitable as operand to insns sign extending QI values |
| +(define_predicate "extendqi_operand" |
| + (ior (match_operand 0 "load_sb_memory_operand") |
| + (match_operand 0 "register_operand"))) |
| + |
| +(define_predicate "post_inc_memory_operand" |
| + (and (match_code "mem") |
| + (match_test "(GET_CODE(XEXP(op, 0)) == POST_INC) |
| + && REG_P(XEXP(XEXP(op, 0), 0))"))) |
| + |
| +(define_predicate "pre_dec_memory_operand" |
| + (and (match_code "mem") |
| + (match_test "(GET_CODE(XEXP(op, 0)) == PRE_DEC) |
| + && REG_P(XEXP(XEXP(op, 0), 0))"))) |
| + |
| +;; Operand suitable for add instructions |
| +(define_predicate "avr32_add_operand" |
| + (ior (match_operand 0 "register_operand") |
| + (and (match_operand 0 "immediate_operand") |
| + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is21\")")))) |
| + |
| +;; Operand is a power of two immediate |
| +(define_predicate "power_of_two_operand" |
| + (match_code "const_int") |
| +{ |
| + HOST_WIDE_INT value = INTVAL (op); |
| + |
| + return value != 0 && (value & (value - 1)) == 0; |
| +}) |
| + |
| +;; Operand is a multiple of 8 immediate |
| +(define_predicate "multiple_of_8_operand" |
| + (match_code "const_int") |
| +{ |
| + HOST_WIDE_INT value = INTVAL (op); |
| + |
| + return (value & 0x7) == 0 ; |
| +}) |
| + |
| +;; Operand is a multiple of 16 immediate |
| +(define_predicate "multiple_of_16_operand" |
| + (match_code "const_int") |
| +{ |
| + HOST_WIDE_INT value = INTVAL (op); |
| + |
| + return (value & 0xf) == 0 ; |
| +}) |
| + |
| +;; Operand is a mask used for masking away upper bits of a reg |
| +(define_predicate "avr32_mask_upper_bits_operand" |
| + (match_code "const_int") |
| +{ |
| + HOST_WIDE_INT value = INTVAL (op) + 1; |
| + |
| + return value != 1 && value != 0 && (value & (value - 1)) == 0; |
| +}) |
| + |
| + |
| +;; Operand suitable for mul instructions |
| +(define_predicate "avr32_mul_operand" |
| + (ior (match_operand 0 "register_operand") |
| + (and (match_operand 0 "immediate_operand") |
| + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")")))) |
| + |
| +;; True for logical binary operators. |
| +(define_predicate "logical_binary_operator" |
| + (match_code "ior,xor,and")) |
| + |
| +;; True for logical shift operators |
| +(define_predicate "logical_shift_operator" |
| + (match_code "ashift,lshiftrt")) |
| + |
| +;; True for shift operand for logical and, or and eor insns |
| +(define_predicate "avr32_logical_shift_operand" |
| + (and (match_code "ashift,lshiftrt") |
| + (ior (and (match_test "GET_CODE(XEXP(op, 1)) == CONST_INT") |
| + (match_test "register_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))")) |
| + (and (match_test "GET_CODE(XEXP(op, 0)) == CONST_INT") |
| + (match_test "register_operand(XEXP(op, 1), GET_MODE(XEXP(op, 1)))")))) |
| + ) |
| + |
| + |
| +;; Predicate for second operand to and, ior and xor insn patterns |
| +(define_predicate "avr32_logical_insn_operand" |
| + (ior (match_operand 0 "register_operand") |
| + (match_operand 0 "avr32_logical_shift_operand")) |
| +) |
| + |
| + |
| +;; True for avr32 comparison operators |
| +(define_predicate "avr32_comparison_operator" |
| + (ior (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu") |
| + (and (match_code "unspec") |
| + (match_test "(XINT(op, 1) == UNSPEC_COND_MI) |
| + || (XINT(op, 1) == UNSPEC_COND_PL)")))) |
| + |
| +(define_predicate "avr32_cond3_comparison_operator" |
| + (ior (match_code "eq, ne, ge, lt, geu, ltu") |
| + (and (match_code "unspec") |
| + (match_test "(XINT(op, 1) == UNSPEC_COND_MI) |
| + || (XINT(op, 1) == UNSPEC_COND_PL)")))) |
| + |
| +;; True for avr32 comparison operand |
| +(define_predicate "avr32_comparison_operand" |
| + (ior (and (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu") |
| + (match_test "(CC0_P (XEXP(op,0)) && rtx_equal_p (XEXP(op,1), const0_rtx))")) |
| + (and (match_code "unspec") |
| + (match_test "(XINT(op, 1) == UNSPEC_COND_MI) |
| + || (XINT(op, 1) == UNSPEC_COND_PL)")))) |
| + |
| +;; True if this is a const_int with one bit set |
| +(define_predicate "one_bit_set_operand" |
| + (match_code "const_int") |
| + { |
| + int i; |
| + int value; |
| + int ones = 0; |
| + |
| + value = INTVAL(op); |
| + for ( i = 0 ; i < 32; i++ ){ |
| + if ( value & ( 1 << i ) ){ |
| + ones++; |
| + } |
| + } |
| + |
| + return ( ones == 1 ); |
| + }) |
| + |
| + |
| +;; True if this is a const_int with one bit cleared |
| +(define_predicate "one_bit_cleared_operand" |
| + (match_code "const_int") |
| + { |
| + int i; |
| + int value; |
| + int zeroes = 0; |
| + |
| + value = INTVAL(op); |
| + for ( i = 0 ; i < 32; i++ ){ |
| + if ( !(value & ( 1 << i )) ){ |
| + zeroes++; |
| + } |
| + } |
| + |
| + return ( zeroes == 1 ); |
| + }) |
| + |
| + |
| +;; Immediate all the low 16-bits cleared |
| +(define_predicate "avr32_hi16_immediate_operand" |
| + (match_code "const_int") |
| + { |
| + /* If the low 16-bits are zero then this |
| + is a hi16 immediate. */ |
| + return ((INTVAL(op) & 0xffff) == 0); |
| + } |
| +) |
| + |
| +;; True if this is a register or immediate operand |
| +(define_predicate "register_immediate_operand" |
| + (ior (match_operand 0 "register_operand") |
| + (match_operand 0 "immediate_operand"))) |
| + |
| +;; True if this is a register or const_int operand |
| +(define_predicate "register_const_int_operand" |
| + (ior (match_operand 0 "register_operand") |
| + (and (match_operand 0 "const_int_operand") |
| + (match_operand 0 "immediate_operand")))) |
| + |
| +;; True if this is a register or const_double operand |
| +(define_predicate "register_const_double_operand" |
| + (ior (match_operand 0 "register_operand") |
| + (match_operand 0 "const_double_operand"))) |
| + |
| +;; True is this is an operand containing a label_ref |
| +(define_predicate "avr32_label_ref_operand" |
| + (and (match_code "mem") |
| + (match_test "avr32_find_symbol(op) |
| + && (GET_CODE(avr32_find_symbol(op)) == LABEL_REF)"))) |
| + |
| +;; True is this is a valid symbol pointing to the constant pool |
| +(define_predicate "avr32_const_pool_operand" |
| + (and (match_code "symbol_ref") |
| + (match_test "CONSTANT_POOL_ADDRESS_P(op)")) |
| + { |
| + return (flag_pic ? (!(symbol_mentioned_p (get_pool_constant (op)) |
| + || label_mentioned_p (get_pool_constant (op))) |
| + || avr32_got_mentioned_p(get_pool_constant (op))) |
| + : true); |
| + } |
| +) |
| + |
| +;; True is this is a memory reference to the constant or mini pool |
| +(define_predicate "avr32_const_pool_ref_operand" |
| + (ior (match_operand 0 "avr32_label_ref_operand") |
| + (and (match_code "mem") |
| + (match_test "avr32_const_pool_operand(XEXP(op,0), GET_MODE(XEXP(op,0)))")))) |
| + |
| + |
| +;; Legal source operand for movti insns |
| +(define_predicate "avr32_movti_src_operand" |
| + (ior (match_operand 0 "avr32_const_pool_ref_operand") |
| + (ior (ior (match_operand 0 "register_immediate_operand") |
| + (match_operand 0 "avr32_indirect_register_operand")) |
| + (match_operand 0 "post_inc_memory_operand")))) |
| + |
| +;; Legal destination operand for movti insns |
| +(define_predicate "avr32_movti_dst_operand" |
| + (ior (ior (match_operand 0 "register_operand") |
| + (match_operand 0 "avr32_indirect_register_operand")) |
| + (match_operand 0 "pre_dec_memory_operand"))) |
| + |
| + |
| +;; True is this is a k12 offseted memory operand |
| +(define_predicate "avr32_k12_memory_operand" |
| + (and (match_code "mem") |
| + (ior (match_test "REG_P(XEXP(op, 0))") |
| + (match_test "GET_CODE(XEXP(op, 0)) == PLUS |
| + && REG_P(XEXP(XEXP(op, 0), 0)) |
| + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT) |
| + && (CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)), |
| + 'K', (mode == SImode) ? \"Ks14\" : ((mode == HImode) ? \"Ks13\" : \"Ks12\")))")))) |
| + |
| +;; True is this is a memory operand with an immediate displacement |
| +(define_predicate "avr32_imm_disp_memory_operand" |
| + (and (match_code "mem") |
| + (match_test "GET_CODE(XEXP(op, 0)) == PLUS |
| + && REG_P(XEXP(XEXP(op, 0), 0)) |
| + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)"))) |
| + |
| +;; True is this is a bswap operand |
| +(define_predicate "avr32_bswap_operand" |
| + (ior (match_operand 0 "avr32_k12_memory_operand") |
| + (match_operand 0 "register_operand"))) |
| + |
| +;; True is this is a valid coprocessor insn memory operand |
| +(define_predicate "avr32_cop_memory_operand" |
| + (and (match_operand 0 "memory_operand") |
| + (not (match_test "GET_CODE(XEXP(op, 0)) == PLUS |
| + && REG_P(XEXP(XEXP(op, 0), 0)) |
| + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT) |
| + && !(CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)), 'K', \"Ku10\"))")))) |
| + |
| +;; True is this is a valid source/destination operand |
| +;; for moving values to/from a coprocessor |
| +(define_predicate "avr32_cop_move_operand" |
| + (ior (match_operand 0 "register_operand") |
| + (match_operand 0 "avr32_cop_memory_operand"))) |
| + |
| + |
| +;; True is this is a valid extract byte offset for use in |
| +;; load extracted index insns |
| +(define_predicate "avr32_extract_shift_operand" |
| + (and (match_operand 0 "const_int_operand") |
| + (match_test "(INTVAL(op) == 0) || (INTVAL(op) == 8) |
| + || (INTVAL(op) == 16) || (INTVAL(op) == 24)"))) |
| + |
| +;; True is this is a floating-point register |
| +(define_predicate "avr32_fp_register_operand" |
| + (and (match_operand 0 "register_operand") |
| + (match_test "REGNO_REG_CLASS(REGNO(op)) == FP_REGS"))) |
| + |
| +;; True is this is valid avr32 symbol operand |
| +(define_predicate "avr32_symbol_operand" |
| + (and (match_code "label_ref, symbol_ref, const") |
| + (match_test "avr32_find_symbol(op)"))) |
| + |
| +;; True is this is valid operand for the lda.w and call pseudo insns |
| +(define_predicate "avr32_address_operand" |
| + (and (and (match_code "label_ref, symbol_ref") |
| + (match_test "avr32_find_symbol(op)")) |
| + (ior (match_test "TARGET_HAS_ASM_ADDR_PSEUDOS") |
| + (match_test "flag_pic")) )) |
| + |
| +;; An immediate k16 address operand |
| +(define_predicate "avr32_ks16_address_operand" |
| + (and (match_operand 0 "address_operand") |
| + (ior (match_test "REG_P(op)") |
| + (match_test "GET_CODE(op) == PLUS |
| + && ((GET_CODE(XEXP(op,0)) == CONST_INT) |
| + || (GET_CODE(XEXP(op,1)) == CONST_INT))")) )) |
| + |
| +;; An offset k16 memory operand |
| +(define_predicate "avr32_ks16_memory_operand" |
| + (and (match_code "mem") |
| + (match_test "avr32_ks16_address_operand (XEXP (op, 0), GET_MODE (XEXP (op, 0)))"))) |
| + |
| +;; An immediate k11 address operand |
| +(define_predicate "avr32_ks11_address_operand" |
| + (and (match_operand 0 "address_operand") |
| + (ior (match_test "REG_P(op)") |
| + (match_test "GET_CODE(op) == PLUS |
| + && (((GET_CODE(XEXP(op,0)) == CONST_INT) |
| + && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,0)), 'K', \"Ks11\")) |
| + || ((GET_CODE(XEXP(op,1)) == CONST_INT) |
| + && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,1)), 'K', \"Ks11\")))")) )) |
| + |
| +;; True if this is a avr32 call operand |
| +(define_predicate "avr32_call_operand" |
| + (ior (ior (match_operand 0 "register_operand") |
| + (ior (match_operand 0 "avr32_const_pool_ref_operand") |
| + (match_operand 0 "avr32_address_operand"))) |
| + (match_test "SYMBOL_REF_RCALL_FUNCTION_P(op)"))) |
| + |
| +;; Return true for operators performing ALU operations |
| + |
| +(define_predicate "alu_operator" |
| + (match_code "ior, xor, and, plus, minus, ashift, lshiftrt, ashiftrt")) |
| + |
| +(define_predicate "avr32_add_shift_immediate_operand" |
| + (and (match_operand 0 "immediate_operand") |
| + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ku02\")"))) |
| + |
| +(define_predicate "avr32_cond_register_immediate_operand" |
| + (ior (match_operand 0 "register_operand") |
| + (and (match_operand 0 "immediate_operand") |
| + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")")))) |
| + |
| +(define_predicate "avr32_cond_immediate_operand" |
| + (and (match_operand 0 "immediate_operand") |
| + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is08\")"))) |
| + |
| + |
| +(define_predicate "avr32_cond_move_operand" |
| + (ior (ior (match_operand 0 "register_operand") |
| + (and (match_operand 0 "immediate_operand") |
| + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))) |
| + (and (match_test "TARGET_V2_INSNS") |
| + (match_operand 0 "memory_operand")))) |
| + |
| +(define_predicate "avr32_mov_immediate_operand" |
| + (and (match_operand 0 "immediate_operand") |
| + (match_test "avr32_const_ok_for_move(INTVAL(op))"))) |
| + |
| + |
| +(define_predicate "avr32_rmw_address_operand" |
| + (ior (and (match_code "symbol_ref") |
| + (match_test "({rtx symbol = avr32_find_symbol(op); \ |
| + symbol && (GET_CODE (symbol) == SYMBOL_REF) && SYMBOL_REF_RMW_ADDR(symbol);})")) |
| + (and (match_operand 0 "immediate_operand") |
| + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks17\")"))) |
| + { |
| + return TARGET_RMW && !flag_pic; |
| + } |
| +) |
| + |
| +(define_predicate "avr32_rmw_memory_operand" |
| + (and (match_code "mem") |
| + (match_test "(GET_MODE(op) == SImode) && |
| + avr32_rmw_address_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))"))) |
| + |
| +(define_predicate "avr32_rmw_memory_or_register_operand" |
| + (ior (match_operand 0 "avr32_rmw_memory_operand") |
| + (match_operand 0 "register_operand"))) |
| + |
| +(define_predicate "avr32_non_rmw_memory_operand" |
| + (and (not (match_operand 0 "avr32_rmw_memory_operand")) |
| + (match_operand 0 "memory_operand"))) |
| + |
| +(define_predicate "avr32_non_rmw_general_operand" |
| + (and (not (match_operand 0 "avr32_rmw_memory_operand")) |
| + (match_operand 0 "general_operand"))) |
| + |
| +(define_predicate "avr32_non_rmw_nonimmediate_operand" |
| + (and (not (match_operand 0 "avr32_rmw_memory_operand")) |
| + (match_operand 0 "nonimmediate_operand"))) |
| --- /dev/null |
| +++ b/gcc/config/avr32/simd.md |
| @@ -0,0 +1,145 @@ |
| +;; AVR32 machine description file for SIMD instructions. |
| +;; Copyright 2003-2006 Atmel Corporation. |
| +;; |
| +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com> |
| +;; |
| +;; This file is part of GCC. |
| +;; |
| +;; This program is free software; you can redistribute it and/or modify |
| +;; it under the terms of the GNU General Public License as published by |
| +;; the Free Software Foundation; either version 2 of the License, or |
| +;; (at your option) any later version. |
| +;; |
| +;; This program is distributed in the hope that it will be useful, |
| +;; but WITHOUT ANY WARRANTY; without even the implied warranty of |
| +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| +;; GNU General Public License for more details. |
| +;; |
| +;; You should have received a copy of the GNU General Public License |
| +;; along with this program; if not, write to the Free Software |
| +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| + |
| +;; -*- Mode: Scheme -*- |
| + |
| + |
| +;; Vector modes |
| +(define_mode_macro VECM [V2HI V4QI]) |
| +(define_mode_attr size [(V2HI "h") (V4QI "b")]) |
| + |
| +(define_insn "add<mode>3" |
| + [(set (match_operand:VECM 0 "register_operand" "=r") |
| + (plus:VECM (match_operand:VECM 1 "register_operand" "r") |
| + (match_operand:VECM 2 "register_operand" "r")))] |
| + "TARGET_SIMD" |
| + "padd.<size>\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu")]) |
| + |
| + |
| +(define_insn "sub<mode>3" |
| + [(set (match_operand:VECM 0 "register_operand" "=r") |
| + (minus:VECM (match_operand:VECM 1 "register_operand" "r") |
| + (match_operand:VECM 2 "register_operand" "r")))] |
| + "TARGET_SIMD" |
| + "psub.<size>\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu")]) |
| + |
| + |
| +(define_insn "abs<mode>2" |
| + [(set (match_operand:VECM 0 "register_operand" "=r") |
| + (abs:VECM (match_operand:VECM 1 "register_operand" "r")))] |
| + "TARGET_SIMD" |
| + "pabs.s<size>\t%0, %1" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu")]) |
| + |
| +(define_insn "ashl<mode>3" |
| + [(set (match_operand:VECM 0 "register_operand" "=r") |
| + (ashift:VECM (match_operand:VECM 1 "register_operand" "r") |
| + (match_operand:SI 2 "immediate_operand" "Ku04")))] |
| + "TARGET_SIMD" |
| + "plsl.<size>\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu")]) |
| + |
| +(define_insn "ashr<mode>3" |
| + [(set (match_operand:VECM 0 "register_operand" "=r") |
| + (ashiftrt:VECM (match_operand:VECM 1 "register_operand" "r") |
| + (match_operand:SI 2 "immediate_operand" "Ku04")))] |
| + "TARGET_SIMD" |
| + "pasr.<size>\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu")]) |
| + |
| +(define_insn "lshr<mode>3" |
| + [(set (match_operand:VECM 0 "register_operand" "=r") |
| + (lshiftrt:VECM (match_operand:VECM 1 "register_operand" "r") |
| + (match_operand:SI 2 "immediate_operand" "Ku04")))] |
| + "TARGET_SIMD" |
| + "plsr.<size>\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu")]) |
| + |
| +(define_insn "smaxv2hi3" |
| + [(set (match_operand:V2HI 0 "register_operand" "=r") |
| + (smax:V2HI (match_operand:V2HI 1 "register_operand" "r") |
| + (match_operand:V2HI 2 "register_operand" "r")))] |
| + |
| + "TARGET_SIMD" |
| + "pmax.sh\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu")]) |
| + |
| +(define_insn "sminv2hi3" |
| + [(set (match_operand:V2HI 0 "register_operand" "=r") |
| + (smin:V2HI (match_operand:V2HI 1 "register_operand" "r") |
| + (match_operand:V2HI 2 "register_operand" "r")))] |
| + |
| + "TARGET_SIMD" |
| + "pmin.sh\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu")]) |
| + |
| +(define_insn "umaxv4qi3" |
| + [(set (match_operand:V4QI 0 "register_operand" "=r") |
| + (umax:V4QI (match_operand:V4QI 1 "register_operand" "r") |
| + (match_operand:V4QI 2 "register_operand" "r")))] |
| + |
| + "TARGET_SIMD" |
| + "pmax.ub\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu")]) |
| + |
| +(define_insn "uminv4qi3" |
| + [(set (match_operand:V4QI 0 "register_operand" "=r") |
| + (umin:V4QI (match_operand:V4QI 1 "register_operand" "r") |
| + (match_operand:V4QI 2 "register_operand" "r")))] |
| + |
| + "TARGET_SIMD" |
| + "pmin.ub\t%0, %1, %2" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu")]) |
| + |
| + |
| +(define_insn "addsubv2hi" |
| + [(set (match_operand:V2HI 0 "register_operand" "=r") |
| + (vec_concat:V2HI |
| + (plus:HI (match_operand:HI 1 "register_operand" "r") |
| + (match_operand:HI 2 "register_operand" "r")) |
| + (minus:HI (match_dup 1) (match_dup 2))))] |
| + "TARGET_SIMD" |
| + "paddsub.h\t%0, %1:b, %2:b" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu")]) |
| + |
| +(define_insn "subaddv2hi" |
| + [(set (match_operand:V2HI 0 "register_operand" "=r") |
| + (vec_concat:V2HI |
| + (minus:HI (match_operand:HI 1 "register_operand" "r") |
| + (match_operand:HI 2 "register_operand" "r")) |
| + (plus:HI (match_dup 1) (match_dup 2))))] |
| + "TARGET_SIMD" |
| + "psubadd.h\t%0, %1:b, %2:b" |
| + [(set_attr "length" "4") |
| + (set_attr "type" "alu")]) |
| --- /dev/null |
| +++ b/gcc/config/avr32/sync.md |
| @@ -0,0 +1,244 @@ |
| +;;================================================================= |
| +;; Atomic operations |
| +;;================================================================= |
| + |
| + |
| +(define_insn "sync_compare_and_swapsi" |
| + [(set (match_operand:SI 0 "register_operand" "=&r,&r") |
| + (match_operand:SI 1 "memory_operand" "+RKs16,+RKs16")) |
| + (set (match_dup 1) |
| + (unspec_volatile:SI |
| + [(match_dup 1) |
| + (match_operand:SI 2 "register_immediate_operand" "r,Ks21") |
| + (match_operand:SI 3 "register_operand" "r,r")] |
| + VUNSPEC_SYNC_CMPXCHG)) ] |
| + "" |
| + "0: |
| + ssrf\t5 |
| + ld.w\t%0,%1 |
| + cp.w\t%0,%2 |
| + brne\t0f |
| + stcond\t%1, %3 |
| + brne\t0b |
| + 0: |
| + " |
| + [(set_attr "length" "16,18") |
| + (set_attr "cc" "clobber")] |
| + ) |
| + |
| + |
| +(define_code_macro atomic_op [plus minus and ior xor]) |
| +(define_code_attr atomic_asm_insn [(plus "add") (minus "sub") (and "and") (ior "or") (xor "eor")]) |
| +(define_code_attr atomic_insn [(plus "add") (minus "sub") (and "and") (ior "ior") (xor "xor")]) |
| + |
| +(define_insn "sync_loadsi" |
| + ; NB! Put an early clobber on the destination operand to |
| + ; avoid gcc using the same register in the source and |
| + ; destination. This is done in order to avoid gcc to |
| + ; clobber the source operand since these instructions |
| + ; are actually inside a "loop". |
| + [(set (match_operand:SI 0 "register_operand" "=&r") |
| + (unspec_volatile:SI |
| + [(match_operand:SI 1 "avr32_ks16_memory_operand" "RKs16") |
| + (label_ref (match_operand 2 "" ""))] |
| + VUNSPEC_SYNC_SET_LOCK_AND_LOAD) )] |
| + "" |
| + "%2: |
| + ssrf\t5 |
| + ld.w\t%0,%1" |
| + [(set_attr "length" "6") |
| + (set_attr "cc" "clobber")] |
| + ) |
| + |
| +(define_insn "sync_store_if_lock" |
| + [(set (match_operand:SI 0 "avr32_ks16_memory_operand" "=RKs16") |
| + (unspec_volatile:SI |
| + [(match_operand:SI 1 "register_operand" "r") |
| + (label_ref (match_operand 2 "" ""))] |
| + VUNSPEC_SYNC_STORE_IF_LOCK) )] |
| + "" |
| + "stcond\t%0, %1 |
| + brne\t%2" |
| + [(set_attr "length" "6") |
| + (set_attr "cc" "clobber")] |
| + ) |
| + |
| + |
| +(define_expand "sync_<atomic_insn>si" |
| + [(set (match_dup 2) |
| + (unspec_volatile:SI |
| + [(match_operand:SI 0 "avr32_ks16_memory_operand" "") |
| + (match_dup 3)] |
| + VUNSPEC_SYNC_SET_LOCK_AND_LOAD)) |
| + (set (match_dup 2) |
| + (atomic_op:SI (match_dup 2) |
| + (match_operand:SI 1 "register_immediate_operand" ""))) |
| + (set (match_dup 0) |
| + (unspec_volatile:SI |
| + [(match_dup 2) |
| + (match_dup 3)] |
| + VUNSPEC_SYNC_STORE_IF_LOCK) ) |
| + (use (match_dup 1)) |
| + (use (match_dup 4))] |
| + "" |
| + { |
| + rtx *mem_expr = &operands[0]; |
| + rtx ptr_reg; |
| + if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) ) |
| + { |
| + ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0)); |
| + XEXP (*mem_expr, 0) = ptr_reg; |
| + } |
| + else |
| + { |
| + rtx address = XEXP (*mem_expr, 0); |
| + if ( REG_P (address) ) |
| + ptr_reg = address; |
| + else if ( REG_P (XEXP (address, 0)) ) |
| + ptr_reg = XEXP (address, 0); |
| + else |
| + ptr_reg = XEXP (address, 1); |
| + } |
| + |
| + operands[2] = gen_reg_rtx (SImode); |
| + operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ()); |
| + operands[4] = ptr_reg; |
| + |
| + } |
| + ) |
| + |
| + |
| + |
| +(define_expand "sync_old_<atomic_insn>si" |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (unspec_volatile:SI |
| + [(match_operand:SI 1 "avr32_ks16_memory_operand" "") |
| + (match_dup 4)] |
| + VUNSPEC_SYNC_SET_LOCK_AND_LOAD)) |
| + (set (match_dup 3) |
| + (atomic_op:SI (match_dup 0) |
| + (match_operand:SI 2 "register_immediate_operand" ""))) |
| + (set (match_dup 1) |
| + (unspec_volatile:SI |
| + [(match_dup 3) |
| + (match_dup 4)] |
| + VUNSPEC_SYNC_STORE_IF_LOCK) ) |
| + (use (match_dup 2)) |
| + (use (match_dup 5))] |
| + "" |
| + { |
| + rtx *mem_expr = &operands[1]; |
| + rtx ptr_reg; |
| + if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) ) |
| + { |
| + ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0)); |
| + XEXP (*mem_expr, 0) = ptr_reg; |
| + } |
| + else |
| + { |
| + rtx address = XEXP (*mem_expr, 0); |
| + if ( REG_P (address) ) |
| + ptr_reg = address; |
| + else if ( REG_P (XEXP (address, 0)) ) |
| + ptr_reg = XEXP (address, 0); |
| + else |
| + ptr_reg = XEXP (address, 1); |
| + } |
| + |
| + operands[3] = gen_reg_rtx (SImode); |
| + operands[4] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ()); |
| + operands[5] = ptr_reg; |
| + } |
| + ) |
| + |
| +(define_expand "sync_new_<atomic_insn>si" |
| + [(set (match_operand:SI 0 "register_operand" "") |
| + (unspec_volatile:SI |
| + [(match_operand:SI 1 "avr32_ks16_memory_operand" "") |
| + (match_dup 3)] |
| + VUNSPEC_SYNC_SET_LOCK_AND_LOAD)) |
| + (set (match_dup 0) |
| + (atomic_op:SI (match_dup 0) |
| + (match_operand:SI 2 "register_immediate_operand" ""))) |
| + (set (match_dup 1) |
| + (unspec_volatile:SI |
| + [(match_dup 0) |
| + (match_dup 3)] |
| + VUNSPEC_SYNC_STORE_IF_LOCK) ) |
| + (use (match_dup 2)) |
| + (use (match_dup 4))] |
| + "" |
| + { |
| + rtx *mem_expr = &operands[1]; |
| + rtx ptr_reg; |
| + if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) ) |
| + { |
| + ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0)); |
| + XEXP (*mem_expr, 0) = ptr_reg; |
| + } |
| + else |
| + { |
| + rtx address = XEXP (*mem_expr, 0); |
| + if ( REG_P (address) ) |
| + ptr_reg = address; |
| + else if ( REG_P (XEXP (address, 0)) ) |
| + ptr_reg = XEXP (address, 0); |
| + else |
| + ptr_reg = XEXP (address, 1); |
| + } |
| + |
| + operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ()); |
| + operands[4] = ptr_reg; |
| + } |
| + ) |
| + |
| + |
| +;(define_insn "sync_<atomic_insn>si" |
| +; [(set (match_operand:SI 0 "memory_operand" "+RKs16") |
| +; (unspec_volatile:SI |
| +; [(atomic_op:SI (match_dup 0) |
| +; (match_operand:SI 1 "register_operand" "r"))] |
| +; VUNSPEC_SYNC_CMPXCHG)) |
| +; (clobber (match_scratch:SI 2 "=&r"))] |
| +; "" |
| +; "0: |
| +; ssrf\t5 |
| +; ld.w\t%2,%0 |
| +; <atomic_asm_insn>\t%2,%1 |
| +; stcond\t%0, %2 |
| +; brne\t0b |
| +; " |
| +; [(set_attr "length" "14") |
| +; (set_attr "cc" "clobber")] |
| +; ) |
| +; |
| +;(define_insn "sync_new_<atomic_insn>si" |
| +; [(set (match_operand:SI 1 "memory_operand" "+RKs16") |
| +; (unspec_volatile:SI |
| +; [(atomic_op:SI (match_dup 1) |
| +; (match_operand:SI 2 "register_operand" "r"))] |
| +; VUNSPEC_SYNC_CMPXCHG)) |
| +; (set (match_operand:SI 0 "register_operand" "=&r") |
| +; (atomic_op:SI (match_dup 1) |
| +; (match_dup 2)))] |
| +; "" |
| +; "0: |
| +; ssrf\t5 |
| +; ld.w\t%0,%1 |
| +; <atomic_asm_insn>\t%0,%2 |
| +; stcond\t%1, %0 |
| +; brne\t0b |
| +; " |
| +; [(set_attr "length" "14") |
| +; (set_attr "cc" "clobber")] |
| +; ) |
| + |
| +(define_insn "sync_lock_test_and_setsi" |
| + [ (set (match_operand:SI 0 "register_operand" "=&r") |
| + (match_operand:SI 1 "memory_operand" "+RKu00")) |
| + (set (match_dup 1) |
| + (match_operand:SI 2 "register_operand" "r")) ] |
| + "" |
| + "xchg\t%0, %p1, %2" |
| + [(set_attr "length" "4")] |
| + ) |
| --- /dev/null |
| +++ b/gcc/config/avr32/t-avr32 |
| @@ -0,0 +1,77 @@ |
| + |
| +MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \ |
| + $(srcdir)/config/avr32/sync.md \ |
| + $(srcdir)/config/avr32/fpcp.md \ |
| + $(srcdir)/config/avr32/simd.md \ |
| + $(srcdir)/config/avr32/predicates.md |
| + |
| +s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \ |
| + s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES) |
| + |
| +# We want fine grained libraries, so use the new code |
| +# to build the floating point emulation libraries. |
| +FPBIT = fp-bit.c |
| +DPBIT = dp-bit.c |
| + |
| +LIB1ASMSRC = avr32/lib1funcs.S |
| +LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_mul_fast _avr32_f64_addsub _avr32_f64_addsub_fast _avr32_f64_to_u32 \ |
| + _avr32_f64_to_s32 _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 \ |
| + _avr32_s32_to_f64 _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \ |
| + _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt _avr32_f64_div _avr32_f64_div_fast \ |
| + _avr32_f32_div _avr32_f32_div_fast _avr32_f32_addsub _avr32_f32_addsub_fast \ |
| + _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \ |
| + _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32 _mulsi3 |
| + |
| +#LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S |
| + |
| +MULTILIB_OPTIONS = march=ap/march=ucr1/march=ucr2/march=ucr2nomul |
| +MULTILIB_DIRNAMES = ap ucr1 ucr2 ucr2nomul |
| +MULTILIB_EXCEPTIONS = |
| +MULTILIB_MATCHES += march?ap=mpart?ap7000 |
| +MULTILIB_MATCHES += march?ap=mpart?ap7001 |
| +MULTILIB_MATCHES += march?ap=mpart?ap7002 |
| +MULTILIB_MATCHES += march?ap=mpart?ap7200 |
| +MULTILIB_MATCHES += march?ucr1=march?uc |
| +MULTILIB_MATCHES += march?ucr1=mpart?uc3a0512es |
| +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0128 |
| +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0256 |
| +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0512 |
| +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1128 |
| +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1256 |
| +MULTILIB_MATCHES += march?ucr1=mpart?uc3a1512es |
| +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1512 |
| +MULTILIB_MATCHES += march?ucr2nomul=mpart?uc3a3revd |
| +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364 |
| +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364s |
| +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128 |
| +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128s |
| +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256 |
| +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256s |
| +MULTILIB_MATCHES += march?ucr1=mpart?uc3b064 |
| +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0128 |
| +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256es |
| +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256 |
| +MULTILIB_MATCHES += march?ucr1=mpart?uc3b164 |
| +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1128 |
| +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256es |
| +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256 |
| + |
| + |
| +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o |
| + |
| +CRTSTUFF_T_CFLAGS = -mrelax |
| +CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC |
| +TARGET_LIBGCC2_CFLAGS += -mrelax |
| + |
| +LIBGCC = stmp-multilib |
| +INSTALL_LIBGCC = install-multilib |
| + |
| +fp-bit.c: $(srcdir)/config/fp-bit.c |
| + echo '#define FLOAT' > fp-bit.c |
| + cat $(srcdir)/config/fp-bit.c >> fp-bit.c |
| + |
| +dp-bit.c: $(srcdir)/config/fp-bit.c |
| + cat $(srcdir)/config/fp-bit.c > dp-bit.c |
| + |
| + |
| + |
| --- /dev/null |
| +++ b/gcc/config/avr32/t-elf |
| @@ -0,0 +1,16 @@ |
| + |
| +# Assemble startup files. |
| +$(T)crti.o: $(srcdir)/config/avr32/crti.asm $(GCC_PASSES) |
| + $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \ |
| + -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/avr32/crti.asm |
| + |
| +$(T)crtn.o: $(srcdir)/config/avr32/crtn.asm $(GCC_PASSES) |
| + $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \ |
| + -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/avr32/crtn.asm |
| + |
| + |
| +# Build the libraries for both hard and soft floating point |
| +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o |
| + |
| +LIBGCC = stmp-multilib |
| +INSTALL_LIBGCC = install-multilib |
| --- /dev/null |
| +++ b/gcc/config/avr32/uclinux-elf.h |
| @@ -0,0 +1,20 @@ |
| + |
| +/* Run-time Target Specification. */ |
| +#undef TARGET_VERSION |
| +#define TARGET_VERSION fputs (" (AVR32 uClinux with ELF)", stderr) |
| + |
| +/* We don't want a .jcr section on uClinux. As if this makes a difference... */ |
| +#define TARGET_USE_JCR_SECTION 0 |
| + |
| +/* Here we go. Drop the crtbegin/crtend stuff completely. */ |
| +#undef STARTFILE_SPEC |
| +#define STARTFILE_SPEC \ |
| + "%{!shared: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s}" \ |
| + " %{!p:%{profile:gcrt1.o%s}" \ |
| + " %{!profile:crt1.o%s}}}} crti.o%s" |
| + |
| +#undef ENDFILE_SPEC |
| +#define ENDFILE_SPEC "crtn.o%s" |
| + |
| +#undef TARGET_DEFAULT |
| +#define TARGET_DEFAULT (AVR32_FLAG_NO_INIT_GOT) |
| --- a/gcc/config/host-linux.c |
| +++ b/gcc/config/host-linux.c |
| @@ -25,6 +25,9 @@ |
| #include "hosthooks.h" |
| #include "hosthooks-def.h" |
| |
| +#ifndef SSIZE_MAX |
| +#define SSIZE_MAX LONG_MAX |
| +#endif |
| |
| /* Linux has a feature called exec-shield-randomize that perturbs the |
| address of non-fixed mapped segments by a (relatively) small amount. |
| --- a/gcc/config.gcc |
| +++ b/gcc/config.gcc |
| @@ -781,6 +781,24 @@ avr-*-*) |
| tm_file="avr/avr.h dbxelf.h" |
| use_fixproto=yes |
| ;; |
| +avr32*-*-linux*) |
| + tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/avr32.h " |
| + tmake_file="t-linux avr32/t-avr32 avr32/t-elf" |
| + extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o" |
| + extra_modes=avr32/avr32-modes.def |
| + gnu_ld=yes |
| + ;; |
| +avr32*-*-uclinux*) |
| + tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/uclinux-elf.h avr32/avr32.h" |
| + tmake_file="t-linux avr32/t-avr32 avr32/t-elf" |
| + extra_modes=avr32/avr32-modes.def |
| + gnu_ld=yes |
| + ;; |
| +avr32-*-*) |
| + tm_file="dbxelf.h elfos.h avr32/avr32.h avr32/avr32-elf.h" |
| + tmake_file="avr32/t-avr32 avr32/t-elf" |
| + extra_modes=avr32/avr32-modes.def |
| + ;; |
| bfin*-elf*) |
| tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h" |
| tmake_file=bfin/t-bfin-elf |
| @@ -1681,6 +1699,9 @@ pdp11-*-bsd) |
| pdp11-*-*) |
| use_fixproto=yes |
| ;; |
| +avr-*-*) |
| + use_fixproto=yes |
| + ;; |
| # port not yet contributed |
| #powerpc-*-openbsd*) |
| # tmake_file="${tmake_file} rs6000/t-fprules rs6000/t-fprules-fpbit " |
| @@ -2717,6 +2738,32 @@ case "${target}" in |
| fi |
| ;; |
| |
| + avr32*-*-*) |
| + supported_defaults="part arch" |
| + |
| + case "$with_part" in |
| + "" \ |
| + | "ap7000" | "ap7010" | "ap7020" | "uc3a0256" | "uc3a0512" | "uc3a1128" | "uc3a1256" | "uc3a1512" ) |
| + # OK |
| + ;; |
| + *) |
| + echo "Unknown part used in --with-part=$with_part" 1>&2 |
| + exit 1 |
| + ;; |
| + esac |
| + |
| + case "$with_arch" in |
| + "" \ |
| + | "ap" | "uc") |
| + # OK |
| + ;; |
| + *) |
| + echo "Unknown arch used in --with-arch=$with_arch" 1>&2 |
| + exit 1 |
| + ;; |
| + esac |
| + ;; |
| + |
| fr*-*-*linux*) |
| supported_defaults=cpu |
| case "$with_cpu" in |
| --- a/gcc/doc/extend.texi |
| +++ b/gcc/doc/extend.texi |
| @@ -1981,7 +1981,7 @@ this attribute to work correctly. |
| |
| @item interrupt |
| @cindex interrupt handler functions |
| -Use this attribute on the ARM, AVR, C4x, CRX, M32C, M32R/D, MS1, and Xstormy16 |
| +Use this attribute on the ARM, AVR, AVR32, C4x, CRX, M32C, M32R/D, MS1, and Xstormy16 |
| ports to indicate that the specified function is an interrupt handler. |
| The compiler will generate function entry and exit sequences suitable |
| for use in an interrupt handler when this attribute is present. |
| @@ -2000,6 +2000,15 @@ void f () __attribute__ ((interrupt ("IR |
| |
| Permissible values for this parameter are: IRQ, FIQ, SWI, ABORT and UNDEF@. |
| |
| +Note, for the AVR32, you can specify which banking scheme is used for |
| +the interrupt mode this interrupt handler is used in like this: |
| + |
| +@smallexample |
| +void f () __attribute__ ((interrupt ("FULL"))); |
| +@end smallexample |
| + |
| +Permissible values for this parameter are: FULL, HALF, NONE and UNDEF. |
| + |
| @item interrupt_handler |
| @cindex interrupt handler functions on the Blackfin, m68k, H8/300 and SH processors |
| Use this attribute on the Blackfin, m68k, H8/300, H8/300H, H8S, and SH to |
| @@ -3460,6 +3469,23 @@ placed in either the @code{.bss_below100 |
| |
| @end table |
| |
| +@subsection AVR32 Variable Attributes |
| + |
| +One attribute is currently defined for AVR32 configurations: |
| +@code{rmw_addressable} |
| + |
| +@table @code |
| +@item rmw_addressable |
| +@cindex @code{rmw_addressable} attribute |
| + |
| +This attribute can be used to signal that a variable can be accessed |
| +with the addressing mode of the AVR32 Atomic Read-Modify-Write memory |
| +instructions and hence make it possible for gcc to generate these |
| +instructions without using built-in functions or inline assembly statements. |
| +Variables used within the AVR32 Atomic Read-Modify-Write built-in |
| +functions will automatically get the @code{rmw_addressable} attribute. |
| +@end table |
| + |
| @node Type Attributes |
| @section Specifying Attributes of Types |
| @cindex attribute of types |
| @@ -6167,6 +6193,7 @@ instructions, but allow the compiler to |
| @menu |
| * Alpha Built-in Functions:: |
| * ARM Built-in Functions:: |
| +* AVR32 Built-in Functions:: |
| * Blackfin Built-in Functions:: |
| * FR-V Built-in Functions:: |
| * X86 Built-in Functions:: |
| @@ -6405,6 +6432,76 @@ long long __builtin_arm_wxor (long long, |
| long long __builtin_arm_wzero () |
| @end smallexample |
| |
| +@node AVR32 Built-in Functions |
| +@subsection AVR32 Built-in Functions |
| + |
| + |
| + |
| +Built-in functions for atomic memory (RMW) instructions. Note that these |
| +built-ins will fail for targets where the RMW instructions are not |
| +implemented. Also note that these instructions only that a Ks15 << 2 |
| +memory address and will therefor not work with any runtime computed |
| +memory addresses. The user is responsible for making sure that any |
| +pointers used within these functions points to a valid memory address. |
| + |
| +@smallexample |
| +void __builtin_mems(int */*ptr*/, int /*bit*/) |
| +void __builtin_memc(int */*ptr*/, int /*bit*/) |
| +void __builtin_memt(int */*ptr*/, int /*bit*/) |
| +@end smallexample |
| + |
| +Built-in functions for DSP instructions. Note that these built-ins will |
| +fail for targets where the DSP instructions are not implemented. |
| + |
| +@smallexample |
| +int __builtin_sats (int /*Rd*/,int /*sa*/, int /*bn*/) |
| +int __builtin_satu (int /*Rd*/,int /*sa*/, int /*bn*/) |
| +int __builtin_satrnds (int /*Rd*/,int /*sa*/, int /*bn*/) |
| +int __builtin_satrndu (int /*Rd*/,int /*sa*/, int /*bn*/) |
| +short __builtin_mulsathh_h (short, short) |
| +int __builtin_mulsathh_w (short, short) |
| +short __builtin_mulsatrndhh_h (short, short) |
| +int __builtin_mulsatrndwh_w (int, short) |
| +int __builtin_mulsatwh_w (int, short) |
| +int __builtin_macsathh_w (int, short, short) |
| +short __builtin_satadd_h (short, short) |
| +short __builtin_satsub_h (short, short) |
| +int __builtin_satadd_w (int, int) |
| +int __builtin_satsub_w (int, int) |
| +long long __builtin_mulwh_d(int, short) |
| +long long __builtin_mulnwh_d(int, short) |
| +long long __builtin_macwh_d(long long, int, short) |
| +long long __builtin_machh_d(long long, short, short) |
| +@end smallexample |
| + |
| +Other built-in functions for instructions that cannot easily be |
| +generated by the compiler. |
| + |
| +@smallexample |
| +void __builtin_ssrf(int); |
| +void __builtin_csrf(int); |
| +void __builtin_musfr(int); |
| +int __builtin_mustr(void); |
| +int __builtin_mfsr(int /*Status Register Address*/) |
| +void __builtin_mtsr(int /*Status Register Address*/, int /*Value*/) |
| +int __builtin_mfdr(int /*Debug Register Address*/) |
| +void __builtin_mtdr(int /*Debug Register Address*/, int /*Value*/) |
| +void __builtin_cache(void * /*Address*/, int /*Cache Operation*/) |
| +void __builtin_sync(int /*Sync Operation*/) |
| +void __builtin_tlbr(void) |
| +void __builtin_tlbs(void) |
| +void __builtin_tlbw(void) |
| +void __builtin_breakpoint(void) |
| +int __builtin_xchg(void * /*Address*/, int /*Value*/ ) |
| +short __builtin_bswap_16(short) |
| +int __builtin_bswap_32(int) |
| +void __builtin_cop(int/*cpnr*/, int/*crd*/, int/*crx*/, int/*cry*/, int/*op*/) |
| +int __builtin_mvcr_w(int/*cpnr*/, int/*crs*/) |
| +void __builtin_mvrc_w(int/*cpnr*/, int/*crd*/, int/*value*/) |
| +long long __builtin_mvcr_d(int/*cpnr*/, int/*crs*/) |
| +void __builtin_mvrc_d(int/*cpnr*/, int/*crd*/, long long/*value*/) |
| +@end smallexample |
| + |
| @node Blackfin Built-in Functions |
| @subsection Blackfin Built-in Functions |
| |
| --- a/gcc/doc/invoke.texi |
| +++ b/gcc/doc/invoke.texi |
| @@ -190,7 +190,7 @@ in the following sections. |
| -fno-default-inline -fvisibility-inlines-hidden @gol |
| -Wabi -Wctor-dtor-privacy @gol |
| -Wnon-virtual-dtor -Wreorder @gol |
| --Weffc++ -Wno-deprecated -Wstrict-null-sentinel @gol |
| +-Weffc++ -Wno-deprecated @gol |
| -Wno-non-template-friend -Wold-style-cast @gol |
| -Woverloaded-virtual -Wno-pmf-conversions @gol |
| -Wsign-promo} |
| @@ -588,6 +588,12 @@ Objective-C and Objective-C++ Dialects}. |
| -mauto-incdec -minmax -mlong-calls -mshort @gol |
| -msoft-reg-count=@var{count}} |
| |
| +@emph{AVR32 Options} |
| +@gccoptlist{-muse-rodata-section -mhard-float -msoft-float -mrelax @gol |
| +-mforce-double-align -mno-init-got -mrelax -mmd-reorg-opt -masm-addr-pseudos @gol |
| +-mpart=@var{part} -mcpu=@var{cpu} -march=@var{arch} @gol |
| +-mfast-float -mimm-in-const-pool} |
| + |
| @emph{MCore Options} |
| @gccoptlist{-mhardlit -mno-hardlit -mdiv -mno-div -mrelax-immediates @gol |
| -mno-relax-immediates -mwide-bitfields -mno-wide-bitfields @gol |
| @@ -1868,14 +1874,6 @@ to filter out those warnings. |
| @opindex Wno-deprecated |
| Do not warn about usage of deprecated features. @xref{Deprecated Features}. |
| |
| -@item -Wstrict-null-sentinel @r{(C++ only)} |
| -@opindex Wstrict-null-sentinel |
| -Warn also about the use of an uncasted @code{NULL} as sentinel. When |
| -compiling only with GCC this is a valid sentinel, as @code{NULL} is defined |
| -to @code{__null}. Although it is a null pointer constant not a null pointer, |
| -it is guaranteed to of the same size as a pointer. But this use is |
| -not portable across different compilers. |
| - |
| @item -Wno-non-template-friend @r{(C++ only)} |
| @opindex Wno-non-template-friend |
| Disable warnings when non-templatized friend functions are declared |
| @@ -2732,13 +2730,11 @@ requiring @option{-O}. |
| If you want to warn about code which uses the uninitialized value of the |
| variable in its own initializer, use the @option{-Winit-self} option. |
| |
| -These warnings occur for individual uninitialized or clobbered |
| -elements of structure, union or array variables as well as for |
| -variables which are uninitialized or clobbered as a whole. They do |
| -not occur for variables or elements declared @code{volatile}. Because |
| -these warnings depend on optimization, the exact variables or elements |
| -for which there are warnings will depend on the precise optimization |
| -options and version of GCC used. |
| +These warnings occur only for variables that are candidates for |
| +register allocation. Therefore, they do not occur for a variable that |
| +is declared @code{volatile}, or whose address is taken, or whose size |
| +is other than 1, 2, 4 or 8 bytes. Also, they do not occur for |
| +structures, unions or arrays, even when they are in registers. |
| |
| Note that there may be no warning about a variable that is used only |
| to compute a value that itself is never used, because such |
| @@ -6201,10 +6197,6 @@ If number of candidates in the set is sm |
| we always try to remove unnecessary ivs from the set during its |
| optimization when a new iv is added to the set. |
| |
| -@item scev-max-expr-size |
| -Bound on size of expressions used in the scalar evolutions analyzer. |
| -Large expressions slow the analyzer. |
| - |
| @item vect-max-version-checks |
| The maximum number of runtime checks that can be performed when doing |
| loop versioning in the vectorizer. See option ftree-vect-loop-version |
| @@ -7402,7 +7394,7 @@ platform. |
| * ARC Options:: |
| * ARM Options:: |
| * AVR Options:: |
| -* Blackfin Options:: |
| +* AVR32 Options:: |
| * CRIS Options:: |
| * CRX Options:: |
| * Darwin Options:: |
| @@ -7867,81 +7859,80 @@ comply to the C standards, but it will p |
| size. |
| @end table |
| |
| -@node Blackfin Options |
| -@subsection Blackfin Options |
| -@cindex Blackfin Options |
| +@node AVR32 Options |
| +@subsection AVR32 Options |
| +@cindex AVR32 Options |
| + |
| +These options are defined for AVR32 implementations: |
| |
| @table @gcctabopt |
| -@item -momit-leaf-frame-pointer |
| -@opindex momit-leaf-frame-pointer |
| -Don't keep the frame pointer in a register for leaf functions. This |
| -avoids the instructions to save, set up and restore frame pointers and |
| -makes an extra register available in leaf functions. The option |
| -@option{-fomit-frame-pointer} removes the frame pointer for all functions |
| -which might make debugging harder. |
| +@item -muse-rodata-section |
| +@opindex muse-rodata-section |
| +Use section @samp{.rodata} for read-only data instead of @samp{.text}. |
| |
| -@item -mspecld-anomaly |
| -@opindex mspecld-anomaly |
| -When enabled, the compiler will ensure that the generated code does not |
| -contain speculative loads after jump instructions. This option is enabled |
| -by default. |
| - |
| -@item -mno-specld-anomaly |
| -@opindex mno-specld-anomaly |
| -Don't generate extra code to prevent speculative loads from occurring. |
| - |
| -@item -mcsync-anomaly |
| -@opindex mcsync-anomaly |
| -When enabled, the compiler will ensure that the generated code does not |
| -contain CSYNC or SSYNC instructions too soon after conditional branches. |
| -This option is enabled by default. |
| - |
| -@item -mno-csync-anomaly |
| -@opindex mno-csync-anomaly |
| -Don't generate extra code to prevent CSYNC or SSYNC instructions from |
| -occurring too soon after a conditional branch. |
| - |
| -@item -mlow-64k |
| -@opindex mlow-64k |
| -When enabled, the compiler is free to take advantage of the knowledge that |
| -the entire program fits into the low 64k of memory. |
| - |
| -@item -mno-low-64k |
| -@opindex mno-low-64k |
| -Assume that the program is arbitrarily large. This is the default. |
| +@item -mhard-float |
| +@opindex mhard-float |
| +Use floating point coprocessor instructions. |
| |
| -@item -mid-shared-library |
| -@opindex mid-shared-library |
| -Generate code that supports shared libraries via the library ID method. |
| -This allows for execute in place and shared libraries in an environment |
| -without virtual memory management. This option implies @option{-fPIC}. |
| +@item -msoft-float |
| +@opindex msoft-float |
| +Use software floating-point library for floating-point operations. |
| |
| -@item -mno-id-shared-library |
| -@opindex mno-id-shared-library |
| -Generate code that doesn't assume ID based shared libraries are being used. |
| -This is the default. |
| +@item -mforce-double-align |
| +@opindex mforce-double-align |
| +Force double-word alignment for double-word memory accesses. |
| + |
| +@item -mno-init-got |
| +@opindex mno-init-got |
| +Do not initialize the GOT register before using it when compiling PIC |
| +code. |
| |
| -@item -mshared-library-id=n |
| -@opindex mshared-library-id |
| -Specified the identification number of the ID based shared library being |
| -compiled. Specifying a value of 0 will generate more compact code, specifying |
| -other values will force the allocation of that number to the current |
| -library but is no more space or time efficient than omitting this option. |
| +@item -mrelax |
| +@opindex mrelax |
| +Let invoked assembler and linker do relaxing |
| +(Enabled by default when optimization level is >1). |
| +This means that when the address of symbols are known at link time, |
| +the linker can optimize @samp{icall} and @samp{mcall} |
| +instructions into a @samp{rcall} instruction if possible. |
| +Loading the address of a symbol can also be optimized. |
| + |
| +@item -mmd-reorg-opt |
| +@opindex mmd-reorg-opt |
| +Perform machine dependent optimizations in reorg stage. |
| + |
| +@item -masm-addr-pseudos |
| +@opindex masm-addr-pseudos |
| +Use assembler pseudo-instructions lda.w and call for handling direct |
| +addresses. (Enabled by default) |
| + |
| +@item -mpart=@var{part} |
| +@opindex mpart |
| +Generate code for the specified part. Permissible parts are: |
| +@samp{ap7000}, @samp{ap7010},@samp{ap7020}, |
| +@samp{uc3a0128}, @samp{uc3a0256}, @samp{uc3a0512}, |
| +@samp{uc3a1128}, @samp{uc3a1256}, @samp{uc3a1512}, |
| +@samp{uc3b064}, @samp{uc3b0128}, @samp{uc3b0256}, |
| +@samp{uc3b164}, @samp{uc3b1128}, @samp{uc3b1256}. |
| |
| -@item -mlong-calls |
| -@itemx -mno-long-calls |
| -@opindex mlong-calls |
| -@opindex mno-long-calls |
| -Tells the compiler to perform function calls by first loading the |
| -address of the function into a register and then performing a subroutine |
| -call on this register. This switch is needed if the target function |
| -will lie outside of the 24 bit addressing range of the offset based |
| -version of subroutine call instruction. |
| +@item -mcpu=@var{cpu-type} |
| +@opindex mcpu |
| +Same as -mpart. Obsolete. |
| + |
| +@item -march=@var{arch} |
| +@opindex march |
| +Generate code for the specified architecture. Permissible architectures are: |
| +@samp{ap} and @samp{uc}. |
| + |
| +@item -mfast-float |
| +@opindex mfast-float |
| +Enable fast floating-point library that does not conform to ieee but is still good enough |
| +for most applications. The fast floating-point library does not round to the nearest even |
| +but away from zero. Enabled by default if the -funsafe-math-optimizations switch is specified. |
| + |
| +@item -mimm-in-const-pool |
| +@opindex mimm-in-const-pool |
| +Put large immediates in constant pool. This is enabled by default for archs with insn-cache. |
| |
| -This feature is not enabled by default. Specifying |
| -@option{-mno-long-calls} will restore the default behavior. Note these |
| -switches have no effect on how the compiler generates code to handle |
| -function calls via function pointers. |
| @end table |
| |
| @node CRIS Options |
| --- a/gcc/doc/md.texi |
| +++ b/gcc/doc/md.texi |
| @@ -1681,6 +1681,80 @@ A memory reference suitable for iWMMXt l |
| A memory reference suitable for the ARMv4 ldrsb instruction. |
| @end table |
| |
| +@item AVR32 family---@file{avr32.h} |
| +@table @code |
| +@item f |
| +Floating-point registers (f0 to f15) (Reserved for future use) |
| + |
| +@item Ku@var{bits} |
| +Unsigned constant representable with @var{bits} number of bits (Must be |
| +two digits). I.e: An unsigned 8-bit constant is written as @samp{Ku08} |
| + |
| +@item Ks@var{bits} |
| +Signed constant representable with @var{bits} number of bits (Must be |
| +two digits). I.e: A signed 12-bit constant is written as @samp{Ks12} |
| + |
| +@item Is@var{bits} |
| +The negated range of a signed constant representable with @var{bits} |
| +number of bits. The same as @samp{Ks@var{bits}} with a negated range. |
| +This means that the constant must be in the range @math{-2^{bits-1}-1} to @math{2^{bits-1}} |
| + |
| +@item G |
| +A single/double precision floating-point immediate or 64-bit integer |
| +immediate where the least and most significant words both can be |
| +loaded with a move instruction. That is the the integer form of the |
| +values in the least and most significant words both are in the range |
| +@math{-2^{20}} to @math{2^{20}-1}. |
| + |
| +@item M |
| +Any 32-bit immediate with the most significant bits set to zero and the |
| +remaining least significant bits set to one. |
| + |
| +@item J |
| +A 32-bit immediate where all the lower 16-bits are zero. |
| + |
| +@item O |
| +A 32-bit immediate with one bit set and the rest of the bits cleared. |
| + |
| +@item N |
| +A 32-bit immediate with one bit cleared and the rest of the bits set. |
| + |
| +@item L |
| +A 32-bit immediate where all the lower 16-bits are set. |
| + |
| +@item Q |
| +Any AVR32 memory reference except for reference used for the atomic memory (RMW) instructions. |
| + |
| +@item RKs@var{bits} |
| +A memory reference where the address consists of a base register |
| +plus a signed immediate displacement with range given by @samp{Ks@var{bits}} |
| +which has the same format as for the signed immediate integer constraint |
| +given above. |
| + |
| +@item RKu@var{bits} |
| +A memory reference where the address consists of a base register |
| +plus an unsigned immediate displacement with range given by @samp{Ku@var{bits}} |
| +which has the same format as for the unsigned immediate integer constraint |
| +given above. |
| + |
| +@item S |
| +A memory reference with an immediate or register offset |
| + |
| +@item T |
| +A memory reference to a constant pool entry |
| + |
| +@item W |
| +A valid operand for use in the @samp{lda.w} instruction macro when |
| +relaxing is enabled |
| + |
| +@item Y |
| +A memory reference suitable for the atomic memory (RMW) instructions. |
| + |
| +@item Z |
| +A memory reference valid for coprocessor memory instructions |
| + |
| +@end table |
| + |
| @item AVR family---@file{config/avr/constraints.md} |
| @table @code |
| @item l |
| --- a/gcc/expmed.c |
| +++ b/gcc/expmed.c |
| @@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. |
| #include "real.h" |
| #include "recog.h" |
| #include "langhooks.h" |
| +#include "target.h" |
| |
| static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT, |
| unsigned HOST_WIDE_INT, |
| @@ -454,9 +455,19 @@ store_bit_field (rtx str_rtx, unsigned H |
| ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD |
| || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode)) |
| && byte_offset % GET_MODE_SIZE (fieldmode) == 0) |
| - : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0)) |
| - || (offset * BITS_PER_UNIT % bitsize == 0 |
| - && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0)))) |
| + : ( |
| + |
| + /* NB! Added for AVR32, and I think this should be true for |
| + all targets not using narrow volatile bitfields. If the |
| + bitfield is volatile then we need to perform an access |
| + consistent with the container type. */ |
| + !(MEM_VOLATILE_P (op0) |
| + && GET_MODE_BITSIZE (GET_MODE (op0)) != bitsize |
| + && bitsize < BITS_PER_WORD |
| + && !targetm.narrow_volatile_bitfield ()) |
| + && (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0)) |
| + || (offset * BITS_PER_UNIT % bitsize == 0 |
| + && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))) |
| { |
| if (MEM_P (op0)) |
| op0 = adjust_address (op0, fieldmode, offset); |
| @@ -1256,6 +1267,13 @@ extract_bit_field (rtx str_rtx, unsigned |
| && GET_MODE_SIZE (mode1) != 0 |
| && byte_offset % GET_MODE_SIZE (mode1) == 0) |
| || (MEM_P (op0) |
| + /* NB! Added for AVR32, and I think this should be true for |
| + all targets not using narrow volatile bitfields. If the |
| + bitfield is volatile then we need to perform an access |
| + consistent with the container type. */ |
| + && !(MEM_VOLATILE_P (op0) |
| + && GET_MODE_BITSIZE (GET_MODE (op0)) != bitsize |
| + && !targetm.narrow_volatile_bitfield ()) |
| && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0)) |
| || (offset * BITS_PER_UNIT % bitsize == 0 |
| && MEM_ALIGN (op0) % bitsize == 0))))) |
| --- a/gcc/expr.c |
| +++ b/gcc/expr.c |
| @@ -3519,18 +3519,19 @@ emit_single_push_insn (enum machine_mode |
| } |
| else |
| { |
| + emit_move_insn (stack_pointer_rtx, |
| + expand_binop (Pmode, |
| #ifdef STACK_GROWS_DOWNWARD |
| - /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */ |
| - dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, |
| - GEN_INT (-(HOST_WIDE_INT) rounded_size)); |
| + sub_optab, |
| #else |
| - /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */ |
| - dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, |
| - GEN_INT (rounded_size)); |
| + add_optab, |
| #endif |
| - dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr); |
| + stack_pointer_rtx, |
| + GEN_INT (rounded_size), |
| + NULL_RTX, 0, OPTAB_LIB_WIDEN)); |
| + dest_addr = stack_pointer_rtx; |
| } |
| - |
| + |
| dest = gen_rtx_MEM (mode, dest_addr); |
| |
| if (type != 0) |
| @@ -5509,7 +5510,21 @@ store_field (rtx target, HOST_WIDE_INT b |
| is a bit field, we cannot use addressing to access it. |
| Use bit-field techniques or SUBREG to store in it. */ |
| |
| - if (mode == VOIDmode |
| + if ( |
| + /* NB! Added for AVR32, and I think this should be true for |
| + all targets not using narrow volatile bitfields. If the |
| + bitfield is volatile then we need to perform an access |
| + consistent with the container type. */ |
| + (MEM_P (target) |
| + && MEM_VOLATILE_P (target) |
| + && ((GET_MODE (target) != BLKmode |
| + && GET_MODE_BITSIZE (GET_MODE (target)) > bitsize ) |
| + /* If BLKmode, check if this is a record. Do not know |
| + if this is really necesarry though...*/ |
| + || (GET_MODE (target) == BLKmode |
| + && TREE_CODE (type) == RECORD_TYPE)) |
| + && !targetm.narrow_volatile_bitfield ()) |
| + || mode == VOIDmode |
| || (mode != BLKmode && ! direct_store[(int) mode] |
| && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT |
| && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT) |
| @@ -7560,7 +7575,21 @@ expand_expr_real_1 (tree exp, rtx target |
| by doing the extract into an object as wide as the field |
| (which we know to be the width of a basic mode), then |
| storing into memory, and changing the mode to BLKmode. */ |
| - if (mode1 == VOIDmode |
| + if ( |
| + /* NB! Added for AVR32, and I think this should be true for |
| + all targets not using narrow volatile bitfields. If the |
| + bitfield is volatile then we need to perform an access |
| + consistent with the container type. */ |
| + (MEM_P (op0) |
| + && MEM_VOLATILE_P (op0) |
| + && ((GET_MODE (op0) != BLKmode |
| + && GET_MODE_BITSIZE (GET_MODE (op0)) > bitsize ) |
| + /* If BLKmode, check if this is a record. Do not know |
| + if this is really necesarry though...*/ |
| + || (GET_MODE (op0) == BLKmode |
| + && TREE_CODE (type) == RECORD_TYPE)) |
| + && !targetm.narrow_volatile_bitfield ()) |
| + || mode1 == VOIDmode |
| || REG_P (op0) || GET_CODE (op0) == SUBREG |
| || (mode1 != BLKmode && ! direct_load[(int) mode1] |
| && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT |
| --- a/gcc/flow.c |
| +++ b/gcc/flow.c |
| @@ -3327,12 +3327,12 @@ not_reg_cond (rtx x) |
| if (GET_CODE (x) == NOT) |
| return XEXP (x, 0); |
| if (COMPARISON_P (x) |
| - && REG_P (XEXP (x, 0))) |
| + /* && REG_P (XEXP (x, 0))*/) |
| { |
| - gcc_assert (XEXP (x, 1) == const0_rtx); |
| + /*gcc_assert (XEXP (x, 1) == const0_rtx);*/ |
| |
| return gen_rtx_fmt_ee (reversed_comparison_code (x, NULL), |
| - VOIDmode, XEXP (x, 0), const0_rtx); |
| + VOIDmode, XEXP (x, 0), XEXP (x, 0) /*const0_rtx*/); |
| } |
| return gen_rtx_NOT (0, x); |
| } |
| --- a/gcc/function.c |
| +++ b/gcc/function.c |
| @@ -2676,8 +2676,12 @@ assign_parm_setup_reg (struct assign_par |
| SET_DECL_RTL (parm, parmreg); |
| |
| /* Copy the value into the register. */ |
| - if (data->nominal_mode != data->passed_mode |
| - || promoted_nominal_mode != data->promoted_mode) |
| + if ( (data->nominal_mode != data->passed_mode |
| + /* Added for AVR32: If passed_mode is equal |
| + to promoted nominal mode why should be convert? |
| + The conversion should make no difference. */ |
| + && data->passed_mode != promoted_nominal_mode) |
| + || promoted_nominal_mode != data->promoted_mode) |
| { |
| int save_tree_used; |
| |
| --- a/gcc/genemit.c |
| +++ b/gcc/genemit.c |
| @@ -121,6 +121,24 @@ max_operand_vec (rtx insn, int arg) |
| } |
| |
| static void |
| +gen_vararg_prologue(int operands) |
| +{ |
| + int i; |
| + |
| + if (operands > 1) |
| + { |
| + for (i = 1; i < operands; i++) |
| + printf(" rtx operand%d ATTRIBUTE_UNUSED;\n", i); |
| + |
| + printf(" va_list args;\n\n"); |
| + printf(" va_start(args, operand0);\n"); |
| + for (i = 1; i < operands; i++) |
| + printf(" operand%d = va_arg(args, rtx);\n", i); |
| + printf(" va_end(args);\n\n"); |
| + } |
| +} |
| + |
| +static void |
| print_code (RTX_CODE code) |
| { |
| const char *p1; |
| @@ -405,18 +423,16 @@ gen_insn (rtx insn, int lineno) |
| fatal ("match_dup operand number has no match_operand"); |
| |
| /* Output the function name and argument declarations. */ |
| - printf ("rtx\ngen_%s (", XSTR (insn, 0)); |
| + printf ("rtx\ngen_%s ", XSTR (insn, 0)); |
| + |
| if (operands) |
| - for (i = 0; i < operands; i++) |
| - if (i) |
| - printf (",\n\trtx operand%d ATTRIBUTE_UNUSED", i); |
| - else |
| - printf ("rtx operand%d ATTRIBUTE_UNUSED", i); |
| + printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n"); |
| else |
| - printf ("void"); |
| - printf (")\n"); |
| + printf("(void)\n"); |
| printf ("{\n"); |
| |
| + gen_vararg_prologue(operands); |
| + |
| /* Output code to construct and return the rtl for the instruction body. */ |
| |
| if (XVECLEN (insn, 1) == 1) |
| @@ -456,16 +472,12 @@ gen_expand (rtx expand) |
| operands = max_operand_vec (expand, 1); |
| |
| /* Output the function name and argument declarations. */ |
| - printf ("rtx\ngen_%s (", XSTR (expand, 0)); |
| + printf ("rtx\ngen_%s ", XSTR (expand, 0)); |
| if (operands) |
| - for (i = 0; i < operands; i++) |
| - if (i) |
| - printf (",\n\trtx operand%d", i); |
| - else |
| - printf ("rtx operand%d", i); |
| + printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n"); |
| else |
| - printf ("void"); |
| - printf (")\n"); |
| + printf("(void)\n"); |
| + |
| printf ("{\n"); |
| |
| /* If we don't have any C code to write, only one insn is being written, |
| @@ -475,6 +487,8 @@ gen_expand (rtx expand) |
| && operands > max_dup_opno |
| && XVECLEN (expand, 1) == 1) |
| { |
| + gen_vararg_prologue(operands); |
| + |
| printf (" return "); |
| gen_exp (XVECEXP (expand, 1, 0), DEFINE_EXPAND, NULL); |
| printf (";\n}\n\n"); |
| @@ -488,6 +502,7 @@ gen_expand (rtx expand) |
| for (; i <= max_scratch_opno; i++) |
| printf (" rtx operand%d ATTRIBUTE_UNUSED;\n", i); |
| printf (" rtx _val = 0;\n"); |
| + gen_vararg_prologue(operands); |
| printf (" start_sequence ();\n"); |
| |
| /* The fourth operand of DEFINE_EXPAND is some code to be executed |
| --- a/gcc/genflags.c |
| +++ b/gcc/genflags.c |
| @@ -127,7 +127,6 @@ static void |
| gen_proto (rtx insn) |
| { |
| int num = num_operands (insn); |
| - int i; |
| const char *name = XSTR (insn, 0); |
| int truth = maybe_eval_c_test (XSTR (insn, 2)); |
| |
| @@ -158,12 +157,7 @@ gen_proto (rtx insn) |
| if (num == 0) |
| fputs ("void", stdout); |
| else |
| - { |
| - for (i = 1; i < num; i++) |
| - fputs ("rtx, ", stdout); |
| - |
| - fputs ("rtx", stdout); |
| - } |
| + fputs("rtx, ...", stdout); |
| |
| puts (");"); |
| |
| @@ -173,12 +167,7 @@ gen_proto (rtx insn) |
| { |
| printf ("static inline rtx\ngen_%s", name); |
| if (num > 0) |
| - { |
| - putchar ('('); |
| - for (i = 0; i < num-1; i++) |
| - printf ("rtx ARG_UNUSED (%c), ", 'a' + i); |
| - printf ("rtx ARG_UNUSED (%c))\n", 'a' + i); |
| - } |
| + puts("(rtx ARG_UNUSED(a), ...)"); |
| else |
| puts ("(void)"); |
| puts ("{\n return 0;\n}"); |
| --- a/gcc/genoutput.c |
| +++ b/gcc/genoutput.c |
| @@ -386,7 +386,7 @@ output_insn_data (void) |
| } |
| |
| if (d->name && d->name[0] != '*') |
| - printf (" (insn_gen_fn) gen_%s,\n", d->name); |
| + printf (" gen_%s,\n", d->name); |
| else |
| printf (" 0,\n"); |
| |
| --- a/gcc/ifcvt.c |
| +++ b/gcc/ifcvt.c |
| @@ -77,7 +77,7 @@ static int num_possible_if_blocks; |
| static int num_updated_if_blocks; |
| |
| /* # of changes made which require life information to be updated. */ |
| -static int num_true_changes; |
| +int num_true_changes; |
| |
| /* Whether conditional execution changes were made. */ |
| static int cond_exec_changed_p; |
| @@ -287,12 +287,15 @@ cond_exec_process_insns (ce_if_block_t * |
| if (must_be_last) |
| return FALSE; |
| |
| - if (modified_in_p (test, insn)) |
| - { |
| - if (!mod_ok) |
| - return FALSE; |
| - must_be_last = TRUE; |
| - } |
| +#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN |
| + if ( !IFCVT_ALLOW_MODIFY_TEST_IN_INSN ) |
| +#endif |
| + if (modified_in_p (test, insn)) |
| + { |
| + if (!mod_ok) |
| + return FALSE; |
| + must_be_last = TRUE; |
| + } |
| |
| /* Now build the conditional form of the instruction. */ |
| pattern = PATTERN (insn); |
| @@ -566,16 +569,19 @@ cond_exec_process_if_block (ce_if_block_ |
| /* Do any machine dependent final modifications. */ |
| IFCVT_MODIFY_FINAL (ce_info); |
| #endif |
| - |
| - /* Conversion succeeded. */ |
| - if (dump_file) |
| - fprintf (dump_file, "%d insn%s converted to conditional execution.\n", |
| - n_insns, (n_insns == 1) ? " was" : "s were"); |
| - |
| + |
| /* Merge the blocks! */ |
| - merge_if_block (ce_info); |
| - cond_exec_changed_p = TRUE; |
| - return TRUE; |
| + if ( reload_completed ){ |
| + /* Conversion succeeded. */ |
| + if (dump_file) |
| + fprintf (dump_file, "%d insn%s converted to conditional execution.\n", |
| + n_insns, (n_insns == 1) ? " was" : "s were"); |
| + |
| + merge_if_block (ce_info); |
| + cond_exec_changed_p = TRUE; |
| + return TRUE; |
| + } |
| + return FALSE; |
| |
| fail: |
| #ifdef IFCVT_MODIFY_CANCEL |
| @@ -1050,7 +1056,11 @@ noce_try_addcc (struct noce_if_info *if_ |
| != UNKNOWN)) |
| { |
| rtx cond = if_info->cond; |
| - enum rtx_code code = reversed_comparison_code (cond, if_info->jump); |
| + /* This generates wrong code for AVR32. The cond code need not be reversed |
| + since the addmodecc patterns add if the condition is NOT met. */ |
| + /* enum rtx_code code = reversed_comparison_code (cond, if_info->jump);*/ |
| + enum rtx_code code = GET_CODE(cond); |
| + |
| |
| /* First try to use addcc pattern. */ |
| if (general_operand (XEXP (cond, 0), VOIDmode) |
| @@ -2651,7 +2661,12 @@ process_if_block (struct ce_if_block * c |
| && cond_move_process_if_block (ce_info)) |
| return TRUE; |
| |
| - if (HAVE_conditional_execution && reload_completed) |
| + if (HAVE_conditional_execution && |
| +#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD |
| + (reload_completed || IFCVT_COND_EXEC_BEFORE_RELOAD)) |
| +#else |
| + reload_completed) |
| +#endif |
| { |
| /* If we have && and || tests, try to first handle combining the && and |
| || tests into the conditional code, and if that fails, go back and |
| @@ -4036,6 +4051,15 @@ rest_of_handle_if_after_reload (void) |
| cleanup_cfg (CLEANUP_EXPENSIVE |
| | CLEANUP_UPDATE_LIFE |
| | (flag_crossjumping ? CLEANUP_CROSSJUMP : 0)); |
| + |
| + /* Hack for the AVR32 experimental ifcvt processing before reload. |
| + The AVR32 specific ifcvt code needs to know when ifcvt after reload |
| + has begun. */ |
| +#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD |
| + if ( IFCVT_COND_EXEC_BEFORE_RELOAD ) |
| + cfun->machine->ifcvt_after_reload = 1; |
| +#endif |
| + |
| if (flag_if_conversion2) |
| if_convert (1); |
| return 0; |
| --- a/gcc/longlong.h |
| +++ b/gcc/longlong.h |
| @@ -226,6 +226,41 @@ UDItype __umulsidi3 (USItype, USItype); |
| #define UDIV_TIME 100 |
| #endif /* __arm__ */ |
| |
| +#if defined (__avr32__) && W_TYPE_SIZE == 32 |
| +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ |
| + __asm__ ("add\t%1, %4, %5\n\tadc\t%0, %2, %3" \ |
| + : "=r" ((USItype) (sh)), \ |
| + "=&r" ((USItype) (sl)) \ |
| + : "r" ((USItype) (ah)), \ |
| + "r" ((USItype) (bh)), \ |
| + "r" ((USItype) (al)), \ |
| + "r" ((USItype) (bl)) __CLOBBER_CC) |
| +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ |
| + __asm__ ("sub\t%1, %4, %5\n\tsbc\t%0, %2, %3" \ |
| + : "=r" ((USItype) (sh)), \ |
| + "=&r" ((USItype) (sl)) \ |
| + : "r" ((USItype) (ah)), \ |
| + "r" ((USItype) (bh)), \ |
| + "r" ((USItype) (al)), \ |
| + "r" ((USItype) (bl)) __CLOBBER_CC) |
| + |
| +#if !defined (__AVR32_UC__) || __AVR32_UC__ != 3 |
| +#define __umulsidi3(a,b) ((UDItype)(a) * (UDItype)(b)) |
| + |
| +#define umul_ppmm(w1, w0, u, v) \ |
| +{ \ |
| + DWunion __w; \ |
| + __w.ll = __umulsidi3 (u, v); \ |
| + w1 = __w.s.high; \ |
| + w0 = __w.s.low; \ |
| +} |
| +#endif |
| + |
| +#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X)) |
| +#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctz (X)) |
| +#define COUNT_LEADING_ZEROS_0 32 |
| +#endif |
| + |
| #if defined (__hppa) && W_TYPE_SIZE == 32 |
| #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ |
| __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0" \ |
| --- a/gcc/optabs.h |
| +++ b/gcc/optabs.h |
| @@ -431,7 +431,7 @@ extern enum insn_code reload_out_optab[N |
| extern GTY(()) optab code_to_optab[NUM_RTX_CODE + 1]; |
| |
| |
| -typedef rtx (*rtxfun) (rtx); |
| +typedef rtx (*rtxfun) (rtx, ...); |
| |
| /* Indexed by the rtx-code for a conditional (e.g. EQ, LT,...) |
| gives the gen_function to make a branch to test that condition. */ |
| --- a/gcc/regrename.c |
| +++ b/gcc/regrename.c |
| @@ -1592,6 +1592,9 @@ copyprop_hardreg_forward_1 (basic_block |
| bool changed = false; |
| rtx insn; |
| |
| + rtx prev_pred_test; |
| + int prev_pred_insn_skipped = 0; |
| + |
| for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn)) |
| { |
| int n_ops, i, alt, predicated; |
| @@ -1630,7 +1633,60 @@ copyprop_hardreg_forward_1 (basic_block |
| || (predicated && recog_data.operand_type[i] == OP_OUT)) |
| recog_data.operand_type[i] = OP_INOUT; |
| } |
| + |
| |
| + /* Added for targets (AVR32) which supports test operands to be modified |
| + in cond_exec instruction. For these targets we cannot make a change to |
| + the test operands if one of the test operands is an output operand This beacuse |
| + changing the test operands might cause the need for inserting a new test |
| + insns in the middle of a sequence of cond_exec insns and if the test operands |
| + are modified these tests will fail. |
| + */ |
| + |
| + if ( IFCVT_ALLOW_MODIFY_TEST_IN_INSN |
| + && predicated ) |
| + { |
| + int insn_skipped = 0; |
| + rtx test = COND_EXEC_TEST (PATTERN (insn)); |
| + |
| + /* Check if the previous insn was a skipped predicated insn with the same |
| + test as this predicated insns. If so we cannot do any modification to |
| + this insn either since we cannot emit the test insn because the operands |
| + are clobbered. */ |
| + if ( prev_pred_insn_skipped |
| + && (rtx_equal_p (test, prev_pred_test) |
| + || rtx_equal_p (test, reversed_condition (prev_pred_test))) ) |
| + { |
| + insn_skipped = 1; |
| + } |
| + else |
| + { |
| + /* Check if the output operand is used in the test expression. */ |
| + for (i = 0; i < n_ops; ++i) |
| + if ( recog_data.operand_type[i] == OP_INOUT |
| + && reg_mentioned_p (recog_data.operand[i], test) ) |
| + { |
| + insn_skipped = 1; |
| + break; |
| + } |
| + |
| + } |
| + |
| + prev_pred_test = test; |
| + prev_pred_insn_skipped = insn_skipped; |
| + if ( insn_skipped ) |
| + { |
| + if (insn == BB_END (bb)) |
| + break; |
| + else |
| + continue; |
| + } |
| + } |
| + else |
| + { |
| + prev_pred_insn_skipped = 0; |
| + } |
| + |
| /* For each earlyclobber operand, zap the value data. */ |
| for (i = 0; i < n_ops; i++) |
| if (recog_op_alt[i][alt].earlyclobber) |
| --- a/gcc/reload.c |
| +++ b/gcc/reload.c |
| @@ -4574,7 +4574,7 @@ find_reloads_toplev (rtx x, int opnum, e |
| x = mem; |
| i = find_reloads_address (GET_MODE (x), &x, XEXP (x, 0), &XEXP (x, 0), |
| opnum, type, ind_levels, insn); |
| - if (x != mem) |
| + if (!rtx_equal_p (x, mem)) |
| push_reg_equiv_alt_mem (regno, x); |
| if (address_reloaded) |
| *address_reloaded = i; |
| --- a/gcc/sched-deps.c |
| +++ b/gcc/sched-deps.c |
| @@ -649,7 +649,14 @@ fixup_sched_groups (rtx insn) |
| |
| prev_nonnote = prev_nonnote_insn (insn); |
| if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote) |
| - && ! sched_insns_conditions_mutex_p (insn, prev_nonnote)) |
| + /* Modification for AVR32 by RP: Why is this here, this will |
| + cause instruction to be without any dependencies which might |
| + cause it to be moved anywhere. For the AVR32 we try to keep |
| + a group of conditionals together even if they are mutual exclusive. |
| + */ |
| + && (! sched_insns_conditions_mutex_p (insn, prev_nonnote) |
| + || GET_CODE (PATTERN (insn)) == COND_EXEC ) |
| + ) |
| add_dependence (insn, prev_nonnote, REG_DEP_ANTI); |
| } |
| |
| @@ -1123,8 +1130,29 @@ sched_analyze_insn (struct deps *deps, r |
| |
| if (code == COND_EXEC) |
| { |
| +#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN |
| + if (IFCVT_ALLOW_MODIFY_TEST_IN_INSN) |
| + { |
| + /* Check if we have a group og conditional instructions with the same test. |
| + If so we must make sure that they are not scheduled apart in order to |
| + avoid unnecesarry tests and if one of the registers in the test is modified |
| + in the instruction this is needed to ensure correct code. */ |
| + if ( prev_nonnote_insn (insn) |
| + && INSN_P (prev_nonnote_insn (insn)) |
| + && GET_CODE (PATTERN (prev_nonnote_insn (insn))) == COND_EXEC |
| + && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 0), XEXP (COND_EXEC_TEST (x), 0)) |
| + && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 1), XEXP (COND_EXEC_TEST (x), 1)) |
| + && ( GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == GET_CODE (COND_EXEC_TEST (x)) |
| + || GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == reversed_comparison_code (COND_EXEC_TEST (x), insn))) |
| + { |
| + SCHED_GROUP_P (insn) = 1; |
| + //CANT_MOVE (prev_nonnote_insn (insn)) = 1; |
| + } |
| + } |
| +#endif |
| sched_analyze_2 (deps, COND_EXEC_TEST (x), insn); |
| |
| + |
| /* ??? Should be recording conditions so we reduce the number of |
| false dependencies. */ |
| x = COND_EXEC_CODE (x); |
| --- a/gcc/testsuite/gcc.dg/sibcall-3.c |
| +++ b/gcc/testsuite/gcc.dg/sibcall-3.c |
| @@ -5,7 +5,7 @@ |
| Copyright (C) 2002 Free Software Foundation Inc. |
| Contributed by Hans-Peter Nilsson <hp@bitrange.com> */ |
| |
| -/* { dg-do run { xfail arc-*-* avr-*-* c4x-*-* cris-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* m681?-*-* m680*-*-* m68k-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa-*-* } } */ |
| +/* { dg-do run { xfail arc-*-* avr-*-* avr32-*-* c4x-*-* cris-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* m681?-*-* m680*-*-* m68k-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa-*-* } } */ |
| /* { dg-options "-O2 -foptimize-sibling-calls" } */ |
| |
| /* The option -foptimize-sibling-calls is the default, but serves as |
| --- a/gcc/testsuite/gcc.dg/sibcall-4.c |
| +++ b/gcc/testsuite/gcc.dg/sibcall-4.c |
| @@ -5,7 +5,7 @@ |
| Copyright (C) 2002 Free Software Foundation Inc. |
| Contributed by Hans-Peter Nilsson <hp@bitrange.com> */ |
| |
| -/* { dg-do run { xfail arc-*-* avr-*-* c4x-*-* cris-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* m681?-*-* m680*-*-* m68k-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa-*-* } } */ |
| +/* { dg-do run { xfail arc-*-* avr-*-* avr32-*-* c4x-*-* cris-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* m681?-*-* m680*-*-* m68k-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa-*-* } } */ |
| /* { dg-options "-O2 -foptimize-sibling-calls" } */ |
| |
| /* The option -foptimize-sibling-calls is the default, but serves as |
| --- a/gcc/testsuite/gcc.dg/trampoline-1.c |
| +++ b/gcc/testsuite/gcc.dg/trampoline-1.c |
| @@ -46,6 +46,8 @@ void foo (void) |
| |
| int main (void) |
| { |
| +#ifndef NO_TRAMPOLINES |
| foo (); |
| +#endif |
| return 0; |
| } |
| --- a/gcc/testsuite/g++.old-deja/g++.pt/static11.C |
| +++ b/gcc/testsuite/g++.old-deja/g++.pt/static11.C |
| @@ -2,7 +2,7 @@ |
| // in their dejagnu baseboard description) require that the status is |
| // final when exit is entered (or main returns), and not "overruled" by a |
| // destructor calling _exit. It's not really worth it to handle that. |
| -// { dg-do run { xfail mmix-knuth-mmixware xtensa-*-elf* arm*-*-elf arm*-*-eabi m68k-*-elf } } |
| +// { dg-do run { xfail mmix-knuth-mmixware xtensa-*-elf* avr32-*-elf arm*-*-elf arm*-*-eabi m68k-*-elf } } |
| |
| // Bug: g++ was failing to destroy C<int>::a because it was using two |
| // different sentry variables for construction and destruction. |
| --- a/gcc/version.c |
| +++ b/gcc/version.c |
| @@ -8,7 +8,7 @@ |
| in parentheses. You may also wish to include a number indicating |
| the revision of your modified compiler. */ |
| |
| -#define VERSUFFIX "" |
| +#define VERSUFFIX "-atmel.1.1.3.avr32linux.1" |
| |
| /* This is the location of the online document giving instructions for |
| reporting bugs. If you distribute a modified version of GCC, |
| @@ -17,9 +17,9 @@ |
| forward us bugs reported to you, if you determine that they are |
| not bugs in your modifications.) */ |
| |
| -const char bug_report_url[] = "<URL:http://gcc.gnu.org/bugs.html>"; |
| +const char bug_report_url[] = "<URL:http://www.atmel.com/avr32/>"; |
| |
| /* The complete version string, assembled from several pieces. |
| BASEVER, DATESTAMP, and DEVPHASE are defined by the Makefile. */ |
| |
| -const char version_string[] = BASEVER DATESTAMP DEVPHASE VERSUFFIX; |
| +const char version_string[] = BASEVER VERSUFFIX DATESTAMP DEVPHASE; |
| --- a/libstdc++-v3/acinclude.m4 |
| +++ b/libstdc++-v3/acinclude.m4 |
| @@ -125,15 +125,6 @@ AC_DEFUN([GLIBCXX_CONFIGURE], [ |
| ## other macros from doing the same. This should be automated.) -pme |
| need_libmath=no |
| |
| - # Check for uClibc since Linux platforms use different configuration |
| - # directories depending on the C library in use. |
| - AC_EGREP_CPP([_using_uclibc], [ |
| - #include <stdio.h> |
| - #if __UCLIBC__ |
| - _using_uclibc |
| - #endif |
| - ], uclibc=yes, uclibc=no) |
| - |
| # Find platform-specific directories containing configuration info. |
| # Also possibly modify flags used elsewhere, as needed by the platform. |
| GLIBCXX_CHECK_HOST |
| @@ -1389,8 +1380,8 @@ AC_DEFUN([GLIBCXX_ENABLE_CLOCALE], [ |
| #endif |
| int main() |
| { |
| - const char __one[] = "Äuglein Augmen"; |
| - const char __two[] = "Äuglein"; |
| + const char __one[] = "Ãuglein Augmen"; |
| + const char __two[] = "Ãuglein"; |
| int i; |
| int j; |
| __locale_t loc; |
| --- a/libstdc++-v3/config/os/gnu-linux/ctype_base.h |
| +++ b/libstdc++-v3/config/os/gnu-linux/ctype_base.h |
| @@ -31,6 +31,8 @@ |
| // |
| // ISO C++ 14882: 22.1 Locales |
| // |
| +#include <features.h> |
| +#include <ctype.h> |
| |
| /** @file ctype_base.h |
| * This is an internal header file, included by other library headers. |
| @@ -45,8 +47,12 @@ _GLIBCXX_BEGIN_NAMESPACE(std) |
| struct ctype_base |
| { |
| // Non-standard typedefs. |
| - typedef const int* __to_type; |
| - |
| +#ifdef __UCLIBC__ |
| + typedef const __ctype_touplow_t* __to_type; |
| +#else |
| + typedef const int* __to_type; |
| +#endif |
| + |
| // NB: Offsets into ctype<char>::_M_table force a particular size |
| // on the mask type. Because of this, we don't use an enum. |
| typedef unsigned short mask; |
| --- a/libstdc++-v3/include/Makefile.in |
| +++ b/libstdc++-v3/include/Makefile.in |
| @@ -36,6 +36,7 @@ POST_UNINSTALL = : |
| build_triplet = @build@ |
| host_triplet = @host@ |
| target_triplet = @target@ |
| +LIBOBJDIR = |
| DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ |
| $(top_srcdir)/fragment.am |
| subdir = include |
| --- a/libstdc++-v3/libmath/Makefile.in |
| +++ b/libstdc++-v3/libmath/Makefile.in |
| @@ -37,6 +37,7 @@ POST_UNINSTALL = : |
| build_triplet = @build@ |
| host_triplet = @host@ |
| target_triplet = @target@ |
| +LIBOBJDIR = |
| subdir = libmath |
| DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in |
| ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 |
| --- a/libstdc++-v3/libsupc++/Makefile.in |
| +++ b/libstdc++-v3/libsupc++/Makefile.in |
| @@ -38,6 +38,7 @@ POST_UNINSTALL = : |
| build_triplet = @build@ |
| host_triplet = @host@ |
| target_triplet = @target@ |
| +LIBOBJDIR = |
| DIST_COMMON = $(glibcxxinstall_HEADERS) $(srcdir)/Makefile.am \ |
| $(srcdir)/Makefile.in $(top_srcdir)/fragment.am |
| subdir = libsupc++ |
| --- a/libstdc++-v3/Makefile.in |
| +++ b/libstdc++-v3/Makefile.in |
| @@ -36,6 +36,7 @@ POST_UNINSTALL = : |
| build_triplet = @build@ |
| host_triplet = @host@ |
| target_triplet = @target@ |
| +LIBOBJDIR = |
| DIST_COMMON = README $(am__configure_deps) $(srcdir)/../config.guess \ |
| $(srcdir)/../config.sub $(srcdir)/../install-sh \ |
| $(srcdir)/../ltmain.sh $(srcdir)/../missing \ |
| --- a/libstdc++-v3/po/Makefile.in |
| +++ b/libstdc++-v3/po/Makefile.in |
| @@ -36,6 +36,7 @@ POST_UNINSTALL = : |
| build_triplet = @build@ |
| host_triplet = @host@ |
| target_triplet = @target@ |
| +LIBOBJDIR = |
| DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ |
| $(top_srcdir)/fragment.am |
| subdir = po |
| --- a/libstdc++-v3/src/Makefile.in |
| +++ b/libstdc++-v3/src/Makefile.in |
| @@ -36,6 +36,7 @@ POST_UNINSTALL = : |
| build_triplet = @build@ |
| host_triplet = @host@ |
| target_triplet = @target@ |
| +LIBOBJDIR = |
| DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ |
| $(top_srcdir)/fragment.am |
| subdir = src |
| --- a/Makefile.def |
| +++ b/Makefile.def |
| @@ -481,7 +481,7 @@ lang_env_dependencies = { module=rda; }; |
| lang_env_dependencies = { module=winsup; }; |
| lang_env_dependencies = { module=qthreads; }; |
| |
| -dependencies = { module=all-target-libgloss; on=configure-target-newlib; }; |
| +dependencies = { module=all-target-libgloss; on=all-target-newlib; }; |
| dependencies = { module=all-target-winsup; on=all-target-libiberty; }; |
| dependencies = { module=all-target-winsup; on=all-target-libtermcap; }; |
| |
| --- a/Makefile.in |
| +++ b/Makefile.in |
| @@ -43791,7 +43791,7 @@ all-target-libobjc: maybe-all-target-boe |
| all-target-libstdc++-v3: maybe-all-target-libiberty |
| install-target-libssp: maybe-install-gcc |
| install-target-libgomp: maybe-install-gcc |
| -all-target-libgloss: maybe-configure-target-newlib |
| +all-target-libgloss: maybe-all-target-newlib |
| all-target-winsup: maybe-all-target-libiberty |
| all-target-winsup: maybe-all-target-libtermcap |
| |
| --- a/gcc/configure.ac |
| +++ b/gcc/configure.ac |
| @@ -2158,7 +2158,7 @@ L2:], |
| as_ver=`$gcc_cv_as --version 2>/dev/null | sed 1q` |
| if echo "$as_ver" | grep GNU > /dev/null; then |
| changequote(,)dnl |
| - as_ver=`echo $as_ver | sed -e 's/GNU assembler \([0-9.][0-9.]*\).*/\1/'` |
| + as_ver=`echo $as_ver | sed -e 's/GNU assembler\( (GNU Binutils)\)\? \([0-9.][0-9.]*\).*/\2/'` |
| as_major=`echo $as_ver | sed 's/\..*//'` |
| as_minor=`echo $as_ver | sed 's/[^.]*\.\([0-9]*\).*/\1/'` |
| changequote([,])dnl |
| @@ -2971,7 +2971,7 @@ esac |
| case "$target" in |
| i?86*-*-* | mips*-*-* | alpha*-*-* | powerpc*-*-* | sparc*-*-* | m68*-*-* \ |
| | x86_64*-*-* | hppa*-*-* | arm*-*-* | strongarm*-*-* | xscale*-*-* \ |
| - | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-*) |
| + | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-* | avr32-*-*) |
| insn="nop" |
| ;; |
| ia64*-*-* | s390*-*-*) |
| --- a/gcc/configure |
| +++ b/gcc/configure |
| @@ -14023,7 +14023,7 @@ L2:' > conftest.s |
| # arbitrary sections are supported and try the test. |
| as_ver=`$gcc_cv_as --version 2>/dev/null | sed 1q` |
| if echo "$as_ver" | grep GNU > /dev/null; then |
| - as_ver=`echo $as_ver | sed -e 's/GNU assembler \([0-9.][0-9.]*\).*/\1/'` |
| + as_ver=`echo $as_ver | sed -e 's/GNU assembler\( (GNU Binutils)\)\? \([0-9.][0-9.]*\).*/\2/'` |
| as_major=`echo $as_ver | sed 's/\..*//'` |
| as_minor=`echo $as_ver | sed 's/[^.]*\.\([0-9]*\).*/\1/'` |
| if test $as_major -eq 2 && test $as_minor -lt 11 |
| @@ -15610,7 +15610,7 @@ esac |
| case "$target" in |
| i?86*-*-* | mips*-*-* | alpha*-*-* | powerpc*-*-* | sparc*-*-* | m68*-*-* \ |
| | x86_64*-*-* | hppa*-*-* | arm*-*-* | strongarm*-*-* | xscale*-*-* \ |
| - | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-*) |
| + | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-* | avr32-*-*) |
| insn="nop" |
| ;; |
| ia64*-*-* | s390*-*-*) |