1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2 
3    Copyright (C) 2009-2016 Free Software Foundation, Inc.
4    Contributed by ARM Ltd.
5 
6    This file is part of GAS.
7 
8    GAS is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the license, or
11    (at your option) any later version.
12 
13    GAS is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program; see the file COPYING3. If not,
20    see <http://www.gnu.org/licenses/>.  */
21 
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define	 NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30 
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35 
36 #include "dwarf2dbg.h"
37 
38 /* Types of processor to assemble for.  */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42 
43 #define streq(a, b)	      (strcmp (a, b) == 0)
44 
45 #define END_OF_INSN '\0'
46 
47 static aarch64_feature_set cpu_variant;
48 
49 /* Variables that we set while parsing command-line options.  Once all
50    options have been read we re-process these values to set the real
51    assembly flags.  */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54 
55 /* Constants for known architecture features.  */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57 
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
60 static symbolS *GOT_symbol;
61 
62 /* Which ABI to use.  */
63 enum aarch64_abi_type
64 {
65   AARCH64_ABI_LP64 = 0,
66   AARCH64_ABI_ILP32 = 1
67 };
68 
69 /* AArch64 ABI for the output file.  */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71 
72 /* When non-zero, program to a 32-bit model, in which the C data types
73    int, long and all pointer types are 32-bit objects (ILP32); or to a
74    64-bit model, in which the C int type is 32-bits but the C long type
75    and all pointer types are 64-bit objects (LP64).  */
76 #define ilp32_p		(aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78 
79 enum neon_el_type
80 {
81   NT_invtype = -1,
82   NT_b,
83   NT_h,
84   NT_s,
85   NT_d,
86   NT_q
87 };
88 
89 /* Bits for DEFINED field in neon_type_el.  */
90 #define NTA_HASTYPE  1
91 #define NTA_HASINDEX 2
92 
93 struct neon_type_el
94 {
95   enum neon_el_type type;
96   unsigned char defined;
97   unsigned width;
98   int64_t index;
99 };
100 
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT	0x00000001
102 
103 struct reloc
104 {
105   bfd_reloc_code_real_type type;
106   expressionS exp;
107   int pc_rel;
108   enum aarch64_opnd opnd;
109   uint32_t flags;
110   unsigned need_libopcodes_p : 1;
111 };
112 
113 struct aarch64_instruction
114 {
115   /* libopcodes structure for instruction intermediate representation.  */
116   aarch64_inst base;
117   /* Record assembly errors found during the parsing.  */
118   struct
119     {
120       enum aarch64_operand_error_kind kind;
121       const char *error;
122     } parsing_error;
123   /* The condition that appears in the assembly line.  */
124   int cond;
125   /* Relocation information (including the GAS internal fixup).  */
126   struct reloc reloc;
127   /* Need to generate an immediate in the literal pool.  */
128   unsigned gen_lit_pool : 1;
129 };
130 
131 typedef struct aarch64_instruction aarch64_instruction;
132 
133 static aarch64_instruction inst;
134 
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137 
138 /* Diagnostics inline function utilites.
139 
140    These are lightweight utlities which should only be called by parse_operands
141    and other parsers.  GAS processes each assembly line by parsing it against
142    instruction template(s), in the case of multiple templates (for the same
143    mnemonic name), those templates are tried one by one until one succeeds or
144    all fail.  An assembly line may fail a few templates before being
145    successfully parsed; an error saved here in most cases is not a user error
146    but an error indicating the current template is not the right template.
147    Therefore it is very important that errors can be saved at a low cost during
148    the parsing; we don't want to slow down the whole parsing by recording
149    non-user errors in detail.
150 
151    Remember that the objective is to help GAS pick up the most approapriate
152    error message in the case of multiple templates, e.g. FMOV which has 8
153    templates.  */
154 
155 static inline void
clear_error(void)156 clear_error (void)
157 {
158   inst.parsing_error.kind = AARCH64_OPDE_NIL;
159   inst.parsing_error.error = NULL;
160 }
161 
162 static inline bfd_boolean
error_p(void)163 error_p (void)
164 {
165   return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167 
168 static inline const char *
get_error_message(void)169 get_error_message (void)
170 {
171   return inst.parsing_error.error;
172 }
173 
174 static inline enum aarch64_operand_error_kind
get_error_kind(void)175 get_error_kind (void)
176 {
177   return inst.parsing_error.kind;
178 }
179 
180 static inline void
set_error(enum aarch64_operand_error_kind kind,const char * error)181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183   inst.parsing_error.kind = kind;
184   inst.parsing_error.error = error;
185 }
186 
187 static inline void
set_recoverable_error(const char * error)188 set_recoverable_error (const char *error)
189 {
190   set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192 
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194    the error message.  */
195 static inline void
set_default_error(void)196 set_default_error (void)
197 {
198   set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200 
201 static inline void
set_syntax_error(const char * error)202 set_syntax_error (const char *error)
203 {
204   set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206 
207 static inline void
set_first_syntax_error(const char * error)208 set_first_syntax_error (const char *error)
209 {
210   if (! error_p ())
211     set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213 
214 static inline void
set_fatal_syntax_error(const char * error)215 set_fatal_syntax_error (const char *error)
216 {
217   set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 
220 /* Number of littlenums required to hold an extended precision number.  */
221 #define MAX_LITTLENUMS 6
222 
223 /* Return value for certain parsers when the parsing fails; those parsers
224    return the information of the parsed result, e.g. register number, on
225    success.  */
226 #define PARSE_FAIL -1
227 
228 /* This is an invalid condition code that means no conditional field is
229    present. */
230 #define COND_ALWAYS 0x10
231 
232 typedef struct
233 {
234   const char *template;
235   unsigned long value;
236 } asm_barrier_opt;
237 
238 typedef struct
239 {
240   const char *template;
241   uint32_t value;
242 } asm_nzcv;
243 
244 struct reloc_entry
245 {
246   char *name;
247   bfd_reloc_code_real_type reloc;
248 };
249 
250 /* Macros to define the register types and masks for the purpose
251    of parsing.  */
252 
253 #undef AARCH64_REG_TYPES
254 #define AARCH64_REG_TYPES	\
255   BASIC_REG_TYPE(R_32)	/* w[0-30] */	\
256   BASIC_REG_TYPE(R_64)	/* x[0-30] */	\
257   BASIC_REG_TYPE(SP_32)	/* wsp     */	\
258   BASIC_REG_TYPE(SP_64)	/* sp      */	\
259   BASIC_REG_TYPE(Z_32)	/* wzr     */	\
260   BASIC_REG_TYPE(Z_64)	/* xzr     */	\
261   BASIC_REG_TYPE(FP_B)	/* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
262   BASIC_REG_TYPE(FP_H)	/* h[0-31] */	\
263   BASIC_REG_TYPE(FP_S)	/* s[0-31] */	\
264   BASIC_REG_TYPE(FP_D)	/* d[0-31] */	\
265   BASIC_REG_TYPE(FP_Q)	/* q[0-31] */	\
266   BASIC_REG_TYPE(CN)	/* c[0-7]  */	\
267   BASIC_REG_TYPE(VN)	/* v[0-31] */	\
268   /* Typecheck: any 64-bit int reg         (inc SP exc XZR) */		\
269   MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64))		\
270   /* Typecheck: any int                    (inc {W}SP inc [WX]ZR) */	\
271   MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64)		\
272 		 | REG_TYPE(SP_32) | REG_TYPE(SP_64)			\
273 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) 			\
274   /* Typecheck: any [BHSDQ]P FP.  */					\
275   MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
276 		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q))	\
277   /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR)  */	\
278   MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64)		\
279 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN)	\
280 		 | REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
281 		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q))	\
282   /* Any integer register; used for error messages only.  */		\
283   MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64)			\
284 		 | REG_TYPE(SP_32) | REG_TYPE(SP_64)			\
285 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64))			\
286   /* Pseudo type to mark the end of the enumerator sequence.  */	\
287   BASIC_REG_TYPE(MAX)
288 
289 #undef BASIC_REG_TYPE
290 #define BASIC_REG_TYPE(T)	REG_TYPE_##T,
291 #undef MULTI_REG_TYPE
292 #define MULTI_REG_TYPE(T,V)	BASIC_REG_TYPE(T)
293 
294 /* Register type enumerators.  */
295 typedef enum aarch64_reg_type_
296 {
297   /* A list of REG_TYPE_*.  */
298   AARCH64_REG_TYPES
299 } aarch64_reg_type;
300 
301 #undef BASIC_REG_TYPE
302 #define BASIC_REG_TYPE(T)	1 << REG_TYPE_##T,
303 #undef REG_TYPE
304 #define REG_TYPE(T)		(1 << REG_TYPE_##T)
305 #undef MULTI_REG_TYPE
306 #define MULTI_REG_TYPE(T,V)	V,
307 
308 /* Structure for a hash table entry for a register.  */
309 typedef struct
310 {
311   const char *name;
312   unsigned char number;
313   ENUM_BITFIELD (aarch64_reg_type_) type : 8;
314   unsigned char builtin;
315 } reg_entry;
316 
317 /* Values indexed by aarch64_reg_type to assist the type checking.  */
318 static const unsigned reg_type_masks[] =
319 {
320   AARCH64_REG_TYPES
321 };
322 
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327 
328 /* Diagnostics used when we don't get a register of the expected type.
329    Note:  this has to synchronized with aarch64_reg_type definitions
330    above.  */
331 static const char *
get_reg_expected_msg(aarch64_reg_type reg_type)332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334   const char *msg;
335 
336   switch (reg_type)
337     {
338     case REG_TYPE_R_32:
339       msg = N_("integer 32-bit register expected");
340       break;
341     case REG_TYPE_R_64:
342       msg = N_("integer 64-bit register expected");
343       break;
344     case REG_TYPE_R_N:
345       msg = N_("integer register expected");
346       break;
347     case REG_TYPE_R_Z_SP:
348       msg = N_("integer, zero or SP register expected");
349       break;
350     case REG_TYPE_FP_B:
351       msg = N_("8-bit SIMD scalar register expected");
352       break;
353     case REG_TYPE_FP_H:
354       msg = N_("16-bit SIMD scalar or floating-point half precision "
355 	       "register expected");
356       break;
357     case REG_TYPE_FP_S:
358       msg = N_("32-bit SIMD scalar or floating-point single precision "
359 	       "register expected");
360       break;
361     case REG_TYPE_FP_D:
362       msg = N_("64-bit SIMD scalar or floating-point double precision "
363 	       "register expected");
364       break;
365     case REG_TYPE_FP_Q:
366       msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 	       "register expected");
368       break;
369     case REG_TYPE_CN:
370       msg = N_("C0 - C15 expected");
371       break;
372     case REG_TYPE_R_Z_BHSDQ_V:
373       msg = N_("register expected");
374       break;
375     case REG_TYPE_BHSDQ:	/* any [BHSDQ]P FP  */
376       msg = N_("SIMD scalar or floating-point register expected");
377       break;
378     case REG_TYPE_VN:		/* any V reg  */
379       msg = N_("vector register expected");
380       break;
381     default:
382       as_fatal (_("invalid register type %d"), reg_type);
383     }
384   return msg;
385 }
386 
387 /* Some well known registers that we refer to directly elsewhere.  */
388 #define REG_SP	31
389 
390 /* Instructions take 4 bytes in the object file.  */
391 #define INSN_SIZE	4
392 
393 /* Define some common error messages.  */
394 #define BAD_SP          _("SP not allowed here")
395 
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409 static struct hash_control *aarch64_hint_opt_hsh;
410 
411 /* Stuff needed to resolve the label ambiguity
412    As:
413      ...
414      label:   <insn>
415    may differ from:
416      ...
417      label:
418 	      <insn>  */
419 
420 static symbolS *last_label_seen;
421 
422 /* Literal pool structure.  Held on a per-section
423    and per-sub-section basis.  */
424 
425 #define MAX_LITERAL_POOL_SIZE 1024
426 typedef struct literal_expression
427 {
428   expressionS exp;
429   /* If exp.op == O_big then this bignum holds a copy of the global bignum value.  */
430   LITTLENUM_TYPE * bignum;
431 } literal_expression;
432 
433 typedef struct literal_pool
434 {
435   literal_expression literals[MAX_LITERAL_POOL_SIZE];
436   unsigned int next_free_entry;
437   unsigned int id;
438   symbolS *symbol;
439   segT section;
440   subsegT sub_section;
441   int size;
442   struct literal_pool *next;
443 } literal_pool;
444 
445 /* Pointer to a linked list of literal pools.  */
446 static literal_pool *list_of_pools = NULL;
447 
448 /* Pure syntax.	 */
449 
450 /* This array holds the chars that always start a comment.  If the
451    pre-processor is disabled, these aren't very useful.	 */
452 const char comment_chars[] = "";
453 
454 /* This array holds the chars that only start a comment at the beginning of
455    a line.  If the line seems to have the form '# 123 filename'
456    .line and .file directives will appear in the pre-processed output.	*/
457 /* Note that input_file.c hand checks for '#' at the beginning of the
458    first line of the input file.  This is because the compiler outputs
459    #NO_APP at the beginning of its output.  */
460 /* Also note that comments like this one will always work.  */
461 const char line_comment_chars[] = "#";
462 
463 const char line_separator_chars[] = ";";
464 
465 /* Chars that can be used to separate mant
466    from exp in floating point numbers.	*/
467 const char EXP_CHARS[] = "eE";
468 
469 /* Chars that mean this number is a floating point constant.  */
470 /* As in 0f12.456  */
471 /* or	 0d1.2345e12  */
472 
473 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
474 
475 /* Prefix character that indicates the start of an immediate value.  */
476 #define is_immediate_prefix(C) ((C) == '#')
477 
478 /* Separator character handling.  */
479 
480 #define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
481 
482 static inline bfd_boolean
skip_past_char(char ** str,char c)483 skip_past_char (char **str, char c)
484 {
485   if (**str == c)
486     {
487       (*str)++;
488       return TRUE;
489     }
490   else
491     return FALSE;
492 }
493 
494 #define skip_past_comma(str) skip_past_char (str, ',')
495 
496 /* Arithmetic expressions (possibly involving symbols).	 */
497 
498 static bfd_boolean in_my_get_expression_p = FALSE;
499 
500 /* Third argument to my_get_expression.	 */
501 #define GE_NO_PREFIX 0
502 #define GE_OPT_PREFIX 1
503 
504 /* Return TRUE if the string pointed by *STR is successfully parsed
505    as an valid expression; *EP will be filled with the information of
506    such an expression.  Otherwise return FALSE.  */
507 
508 static bfd_boolean
my_get_expression(expressionS * ep,char ** str,int prefix_mode,int reject_absent)509 my_get_expression (expressionS * ep, char **str, int prefix_mode,
510 		   int reject_absent)
511 {
512   char *save_in;
513   segT seg;
514   int prefix_present_p = 0;
515 
516   switch (prefix_mode)
517     {
518     case GE_NO_PREFIX:
519       break;
520     case GE_OPT_PREFIX:
521       if (is_immediate_prefix (**str))
522 	{
523 	  (*str)++;
524 	  prefix_present_p = 1;
525 	}
526       break;
527     default:
528       abort ();
529     }
530 
531   memset (ep, 0, sizeof (expressionS));
532 
533   save_in = input_line_pointer;
534   input_line_pointer = *str;
535   in_my_get_expression_p = TRUE;
536   seg = expression (ep);
537   in_my_get_expression_p = FALSE;
538 
539   if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
540     {
541       /* We found a bad expression in md_operand().  */
542       *str = input_line_pointer;
543       input_line_pointer = save_in;
544       if (prefix_present_p && ! error_p ())
545 	set_fatal_syntax_error (_("bad expression"));
546       else
547 	set_first_syntax_error (_("bad expression"));
548       return FALSE;
549     }
550 
551 #ifdef OBJ_AOUT
552   if (seg != absolute_section
553       && seg != text_section
554       && seg != data_section
555       && seg != bss_section && seg != undefined_section)
556     {
557       set_syntax_error (_("bad segment"));
558       *str = input_line_pointer;
559       input_line_pointer = save_in;
560       return FALSE;
561     }
562 #else
563   (void) seg;
564 #endif
565 
566   *str = input_line_pointer;
567   input_line_pointer = save_in;
568   return TRUE;
569 }
570 
571 /* Turn a string in input_line_pointer into a floating point constant
572    of type TYPE, and store the appropriate bytes in *LITP.  The number
573    of LITTLENUMS emitted is stored in *SIZEP.  An error message is
574    returned, or NULL on OK.  */
575 
576 const char *
md_atof(int type,char * litP,int * sizeP)577 md_atof (int type, char *litP, int *sizeP)
578 {
579   return ieee_md_atof (type, litP, sizeP, target_big_endian);
580 }
581 
582 /* We handle all bad expressions here, so that we can report the faulty
583    instruction in the error message.  */
584 void
md_operand(expressionS * exp)585 md_operand (expressionS * exp)
586 {
587   if (in_my_get_expression_p)
588     exp->X_op = O_illegal;
589 }
590 
591 /* Immediate values.  */
592 
593 /* Errors may be set multiple times during parsing or bit encoding
594    (particularly in the Neon bits), but usually the earliest error which is set
595    will be the most meaningful. Avoid overwriting it with later (cascading)
596    errors by calling this function.  */
597 
598 static void
first_error(const char * error)599 first_error (const char *error)
600 {
601   if (! error_p ())
602     set_syntax_error (error);
603 }
604 
605 /* Similiar to first_error, but this function accepts formatted error
606    message.  */
607 static void
first_error_fmt(const char * format,...)608 first_error_fmt (const char *format, ...)
609 {
610   va_list args;
611   enum
612   { size = 100 };
613   /* N.B. this single buffer will not cause error messages for different
614      instructions to pollute each other; this is because at the end of
615      processing of each assembly line, error message if any will be
616      collected by as_bad.  */
617   static char buffer[size];
618 
619   if (! error_p ())
620     {
621       int ret ATTRIBUTE_UNUSED;
622       va_start (args, format);
623       ret = vsnprintf (buffer, size, format, args);
624       know (ret <= size - 1 && ret >= 0);
625       va_end (args);
626       set_syntax_error (buffer);
627     }
628 }
629 
630 /* Register parsing.  */
631 
632 /* Generic register parser which is called by other specialized
633    register parsers.
634    CCP points to what should be the beginning of a register name.
635    If it is indeed a valid register name, advance CCP over it and
636    return the reg_entry structure; otherwise return NULL.
637    It does not issue diagnostics.  */
638 
639 static reg_entry *
parse_reg(char ** ccp)640 parse_reg (char **ccp)
641 {
642   char *start = *ccp;
643   char *p;
644   reg_entry *reg;
645 
646 #ifdef REGISTER_PREFIX
647   if (*start != REGISTER_PREFIX)
648     return NULL;
649   start++;
650 #endif
651 
652   p = start;
653   if (!ISALPHA (*p) || !is_name_beginner (*p))
654     return NULL;
655 
656   do
657     p++;
658   while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
659 
660   reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
661 
662   if (!reg)
663     return NULL;
664 
665   *ccp = p;
666   return reg;
667 }
668 
669 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
670    return FALSE.  */
671 static bfd_boolean
aarch64_check_reg_type(const reg_entry * reg,aarch64_reg_type type)672 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
673 {
674   if (reg->type == type)
675     return TRUE;
676 
677   switch (type)
678     {
679     case REG_TYPE_R64_SP:	/* 64-bit integer reg (inc SP exc XZR).  */
680     case REG_TYPE_R_Z_SP:	/* Integer reg (inc {X}SP inc [WX]ZR).  */
681     case REG_TYPE_R_Z_BHSDQ_V:	/* Any register apart from Cn.  */
682     case REG_TYPE_BHSDQ:	/* Any [BHSDQ]P FP or SIMD scalar register.  */
683     case REG_TYPE_VN:		/* Vector register.  */
684       gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
685       return ((reg_type_masks[reg->type] & reg_type_masks[type])
686 	      == reg_type_masks[reg->type]);
687     default:
688       as_fatal ("unhandled type %d", type);
689       abort ();
690     }
691 }
692 
693 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
694    Return the register number otherwise.  *ISREG32 is set to one if the
695    register is 32-bit wide; *ISREGZERO is set to one if the register is
696    of type Z_32 or Z_64.
697    Note that this function does not issue any diagnostics.  */
698 
699 static int
aarch64_reg_parse_32_64(char ** ccp,int reject_sp,int reject_rz,int * isreg32,int * isregzero)700 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
701 			 int *isreg32, int *isregzero)
702 {
703   char *str = *ccp;
704   const reg_entry *reg = parse_reg (&str);
705 
706   if (reg == NULL)
707     return PARSE_FAIL;
708 
709   if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
710     return PARSE_FAIL;
711 
712   switch (reg->type)
713     {
714     case REG_TYPE_SP_32:
715     case REG_TYPE_SP_64:
716       if (reject_sp)
717 	return PARSE_FAIL;
718       *isreg32 = reg->type == REG_TYPE_SP_32;
719       *isregzero = 0;
720       break;
721     case REG_TYPE_R_32:
722     case REG_TYPE_R_64:
723       *isreg32 = reg->type == REG_TYPE_R_32;
724       *isregzero = 0;
725       break;
726     case REG_TYPE_Z_32:
727     case REG_TYPE_Z_64:
728       if (reject_rz)
729 	return PARSE_FAIL;
730       *isreg32 = reg->type == REG_TYPE_Z_32;
731       *isregzero = 1;
732       break;
733     default:
734       return PARSE_FAIL;
735     }
736 
737   *ccp = str;
738 
739   return reg->number;
740 }
741 
742 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
743    Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
744    otherwise return FALSE.
745 
746    Accept only one occurrence of:
747    8b 16b 2h 4h 8h 2s 4s 1d 2d
748    b h s d q  */
749 static bfd_boolean
parse_neon_type_for_operand(struct neon_type_el * parsed_type,char ** str)750 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
751 {
752   char *ptr = *str;
753   unsigned width;
754   unsigned element_size;
755   enum neon_el_type type;
756 
757   /* skip '.' */
758   ptr++;
759 
760   if (!ISDIGIT (*ptr))
761     {
762       width = 0;
763       goto elt_size;
764     }
765   width = strtoul (ptr, &ptr, 10);
766   if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
767     {
768       first_error_fmt (_("bad size %d in vector width specifier"), width);
769       return FALSE;
770     }
771 
772 elt_size:
773   switch (TOLOWER (*ptr))
774     {
775     case 'b':
776       type = NT_b;
777       element_size = 8;
778       break;
779     case 'h':
780       type = NT_h;
781       element_size = 16;
782       break;
783     case 's':
784       type = NT_s;
785       element_size = 32;
786       break;
787     case 'd':
788       type = NT_d;
789       element_size = 64;
790       break;
791     case 'q':
792       if (width == 1)
793 	{
794 	  type = NT_q;
795 	  element_size = 128;
796 	  break;
797 	}
798       /* fall through.  */
799     default:
800       if (*ptr != '\0')
801 	first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
802       else
803 	first_error (_("missing element size"));
804       return FALSE;
805     }
806   if (width != 0 && width * element_size != 64 && width * element_size != 128
807       && !(width == 2 && element_size == 16))
808     {
809       first_error_fmt (_
810 		       ("invalid element size %d and vector size combination %c"),
811 		       width, *ptr);
812       return FALSE;
813     }
814   ptr++;
815 
816   parsed_type->type = type;
817   parsed_type->width = width;
818 
819   *str = ptr;
820 
821   return TRUE;
822 }
823 
824 /* Parse a single type, e.g. ".8b", leading period included.
825    Only applicable to Vn registers.
826 
827    Return TRUE on success; otherwise return FALSE.  */
828 static bfd_boolean
parse_neon_operand_type(struct neon_type_el * vectype,char ** ccp)829 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
830 {
831   char *str = *ccp;
832 
833   if (*str == '.')
834     {
835       if (! parse_neon_type_for_operand (vectype, &str))
836 	{
837 	  first_error (_("vector type expected"));
838 	  return FALSE;
839 	}
840     }
841   else
842     return FALSE;
843 
844   *ccp = str;
845 
846   return TRUE;
847 }
848 
849 /* Parse a register of the type TYPE.
850 
851    Return PARSE_FAIL if the string pointed by *CCP is not a valid register
852    name or the parsed register is not of TYPE.
853 
854    Otherwise return the register number, and optionally fill in the actual
855    type of the register in *RTYPE when multiple alternatives were given, and
856    return the register shape and element index information in *TYPEINFO.
857 
858    IN_REG_LIST should be set with TRUE if the caller is parsing a register
859    list.  */
860 
861 static int
parse_typed_reg(char ** ccp,aarch64_reg_type type,aarch64_reg_type * rtype,struct neon_type_el * typeinfo,bfd_boolean in_reg_list)862 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
863 		 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
864 {
865   char *str = *ccp;
866   const reg_entry *reg = parse_reg (&str);
867   struct neon_type_el atype;
868   struct neon_type_el parsetype;
869   bfd_boolean is_typed_vecreg = FALSE;
870 
871   atype.defined = 0;
872   atype.type = NT_invtype;
873   atype.width = -1;
874   atype.index = 0;
875 
876   if (reg == NULL)
877     {
878       if (typeinfo)
879 	*typeinfo = atype;
880       set_default_error ();
881       return PARSE_FAIL;
882     }
883 
884   if (! aarch64_check_reg_type (reg, type))
885     {
886       DEBUG_TRACE ("reg type check failed");
887       set_default_error ();
888       return PARSE_FAIL;
889     }
890   type = reg->type;
891 
892   if (type == REG_TYPE_VN
893       && parse_neon_operand_type (&parsetype, &str))
894     {
895       /* Register if of the form Vn.[bhsdq].  */
896       is_typed_vecreg = TRUE;
897 
898       if (parsetype.width == 0)
899 	/* Expect index. In the new scheme we cannot have
900 	   Vn.[bhsdq] represent a scalar. Therefore any
901 	   Vn.[bhsdq] should have an index following it.
902 	   Except in reglists ofcourse.  */
903 	atype.defined |= NTA_HASINDEX;
904       else
905 	atype.defined |= NTA_HASTYPE;
906 
907       atype.type = parsetype.type;
908       atype.width = parsetype.width;
909     }
910 
911   if (skip_past_char (&str, '['))
912     {
913       expressionS exp;
914 
915       /* Reject Sn[index] syntax.  */
916       if (!is_typed_vecreg)
917 	{
918 	  first_error (_("this type of register can't be indexed"));
919 	  return PARSE_FAIL;
920 	}
921 
922       if (in_reg_list == TRUE)
923 	{
924 	  first_error (_("index not allowed inside register list"));
925 	  return PARSE_FAIL;
926 	}
927 
928       atype.defined |= NTA_HASINDEX;
929 
930       my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
931 
932       if (exp.X_op != O_constant)
933 	{
934 	  first_error (_("constant expression required"));
935 	  return PARSE_FAIL;
936 	}
937 
938       if (! skip_past_char (&str, ']'))
939 	return PARSE_FAIL;
940 
941       atype.index = exp.X_add_number;
942     }
943   else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
944     {
945       /* Indexed vector register expected.  */
946       first_error (_("indexed vector register expected"));
947       return PARSE_FAIL;
948     }
949 
950   /* A vector reg Vn should be typed or indexed.  */
951   if (type == REG_TYPE_VN && atype.defined == 0)
952     {
953       first_error (_("invalid use of vector register"));
954     }
955 
956   if (typeinfo)
957     *typeinfo = atype;
958 
959   if (rtype)
960     *rtype = type;
961 
962   *ccp = str;
963 
964   return reg->number;
965 }
966 
967 /* Parse register.
968 
969    Return the register number on success; return PARSE_FAIL otherwise.
970 
971    If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
972    the register (e.g. NEON double or quad reg when either has been requested).
973 
974    If this is a NEON vector register with additional type information, fill
975    in the struct pointed to by VECTYPE (if non-NULL).
976 
977    This parser does not handle register list.  */
978 
979 static int
aarch64_reg_parse(char ** ccp,aarch64_reg_type type,aarch64_reg_type * rtype,struct neon_type_el * vectype)980 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
981 		   aarch64_reg_type *rtype, struct neon_type_el *vectype)
982 {
983   struct neon_type_el atype;
984   char *str = *ccp;
985   int reg = parse_typed_reg (&str, type, rtype, &atype,
986 			     /*in_reg_list= */ FALSE);
987 
988   if (reg == PARSE_FAIL)
989     return PARSE_FAIL;
990 
991   if (vectype)
992     *vectype = atype;
993 
994   *ccp = str;
995 
996   return reg;
997 }
998 
999 static inline bfd_boolean
eq_neon_type_el(struct neon_type_el e1,struct neon_type_el e2)1000 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
1001 {
1002   return
1003     e1.type == e2.type
1004     && e1.defined == e2.defined
1005     && e1.width == e2.width && e1.index == e2.index;
1006 }
1007 
1008 /* This function parses the NEON register list.  On success, it returns
1009    the parsed register list information in the following encoded format:
1010 
1011    bit   18-22   |   13-17   |   7-11    |    2-6    |   0-1
1012        4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1013 
1014    The information of the register shape and/or index is returned in
1015    *VECTYPE.
1016 
1017    It returns PARSE_FAIL if the register list is invalid.
1018 
1019    The list contains one to four registers.
1020    Each register can be one of:
1021    <Vt>.<T>[<index>]
1022    <Vt>.<T>
1023    All <T> should be identical.
1024    All <index> should be identical.
1025    There are restrictions on <Vt> numbers which are checked later
1026    (by reg_list_valid_p).  */
1027 
1028 static int
parse_neon_reg_list(char ** ccp,struct neon_type_el * vectype)1029 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1030 {
1031   char *str = *ccp;
1032   int nb_regs;
1033   struct neon_type_el typeinfo, typeinfo_first;
1034   int val, val_range;
1035   int in_range;
1036   int ret_val;
1037   int i;
1038   bfd_boolean error = FALSE;
1039   bfd_boolean expect_index = FALSE;
1040 
1041   if (*str != '{')
1042     {
1043       set_syntax_error (_("expecting {"));
1044       return PARSE_FAIL;
1045     }
1046   str++;
1047 
1048   nb_regs = 0;
1049   typeinfo_first.defined = 0;
1050   typeinfo_first.type = NT_invtype;
1051   typeinfo_first.width = -1;
1052   typeinfo_first.index = 0;
1053   ret_val = 0;
1054   val = -1;
1055   val_range = -1;
1056   in_range = 0;
1057   do
1058     {
1059       if (in_range)
1060 	{
1061 	  str++;		/* skip over '-' */
1062 	  val_range = val;
1063 	}
1064       val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1065 			     /*in_reg_list= */ TRUE);
1066       if (val == PARSE_FAIL)
1067 	{
1068 	  set_first_syntax_error (_("invalid vector register in list"));
1069 	  error = TRUE;
1070 	  continue;
1071 	}
1072       /* reject [bhsd]n */
1073       if (typeinfo.defined == 0)
1074 	{
1075 	  set_first_syntax_error (_("invalid scalar register in list"));
1076 	  error = TRUE;
1077 	  continue;
1078 	}
1079 
1080       if (typeinfo.defined & NTA_HASINDEX)
1081 	expect_index = TRUE;
1082 
1083       if (in_range)
1084 	{
1085 	  if (val < val_range)
1086 	    {
1087 	      set_first_syntax_error
1088 		(_("invalid range in vector register list"));
1089 	      error = TRUE;
1090 	    }
1091 	  val_range++;
1092 	}
1093       else
1094 	{
1095 	  val_range = val;
1096 	  if (nb_regs == 0)
1097 	    typeinfo_first = typeinfo;
1098 	  else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1099 	    {
1100 	      set_first_syntax_error
1101 		(_("type mismatch in vector register list"));
1102 	      error = TRUE;
1103 	    }
1104 	}
1105       if (! error)
1106 	for (i = val_range; i <= val; i++)
1107 	  {
1108 	    ret_val |= i << (5 * nb_regs);
1109 	    nb_regs++;
1110 	  }
1111       in_range = 0;
1112     }
1113   while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1114 
1115   skip_whitespace (str);
1116   if (*str != '}')
1117     {
1118       set_first_syntax_error (_("end of vector register list not found"));
1119       error = TRUE;
1120     }
1121   str++;
1122 
1123   skip_whitespace (str);
1124 
1125   if (expect_index)
1126     {
1127       if (skip_past_char (&str, '['))
1128 	{
1129 	  expressionS exp;
1130 
1131 	  my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1132 	  if (exp.X_op != O_constant)
1133 	    {
1134 	      set_first_syntax_error (_("constant expression required."));
1135 	      error = TRUE;
1136 	    }
1137 	  if (! skip_past_char (&str, ']'))
1138 	    error = TRUE;
1139 	  else
1140 	    typeinfo_first.index = exp.X_add_number;
1141 	}
1142       else
1143 	{
1144 	  set_first_syntax_error (_("expected index"));
1145 	  error = TRUE;
1146 	}
1147     }
1148 
1149   if (nb_regs > 4)
1150     {
1151       set_first_syntax_error (_("too many registers in vector register list"));
1152       error = TRUE;
1153     }
1154   else if (nb_regs == 0)
1155     {
1156       set_first_syntax_error (_("empty vector register list"));
1157       error = TRUE;
1158     }
1159 
1160   *ccp = str;
1161   if (! error)
1162     *vectype = typeinfo_first;
1163 
1164   return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1165 }
1166 
1167 /* Directives: register aliases.  */
1168 
1169 static reg_entry *
insert_reg_alias(char * str,int number,aarch64_reg_type type)1170 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1171 {
1172   reg_entry *new;
1173   const char *name;
1174 
1175   if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1176     {
1177       if (new->builtin)
1178 	as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1179 		 str);
1180 
1181       /* Only warn about a redefinition if it's not defined as the
1182          same register.  */
1183       else if (new->number != number || new->type != type)
1184 	as_warn (_("ignoring redefinition of register alias '%s'"), str);
1185 
1186       return NULL;
1187     }
1188 
1189   name = xstrdup (str);
1190   new = XNEW (reg_entry);
1191 
1192   new->name = name;
1193   new->number = number;
1194   new->type = type;
1195   new->builtin = FALSE;
1196 
1197   if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1198     abort ();
1199 
1200   return new;
1201 }
1202 
1203 /* Look for the .req directive.	 This is of the form:
1204 
1205 	new_register_name .req existing_register_name
1206 
1207    If we find one, or if it looks sufficiently like one that we want to
1208    handle any error here, return TRUE.  Otherwise return FALSE.  */
1209 
1210 static bfd_boolean
create_register_alias(char * newname,char * p)1211 create_register_alias (char *newname, char *p)
1212 {
1213   const reg_entry *old;
1214   char *oldname, *nbuf;
1215   size_t nlen;
1216 
1217   /* The input scrubber ensures that whitespace after the mnemonic is
1218      collapsed to single spaces.  */
1219   oldname = p;
1220   if (strncmp (oldname, " .req ", 6) != 0)
1221     return FALSE;
1222 
1223   oldname += 6;
1224   if (*oldname == '\0')
1225     return FALSE;
1226 
1227   old = hash_find (aarch64_reg_hsh, oldname);
1228   if (!old)
1229     {
1230       as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1231       return TRUE;
1232     }
1233 
1234   /* If TC_CASE_SENSITIVE is defined, then newname already points to
1235      the desired alias name, and p points to its end.  If not, then
1236      the desired alias name is in the global original_case_string.  */
1237 #ifdef TC_CASE_SENSITIVE
1238   nlen = p - newname;
1239 #else
1240   newname = original_case_string;
1241   nlen = strlen (newname);
1242 #endif
1243 
1244   nbuf = xmemdup0 (newname, nlen);
1245 
1246   /* Create aliases under the new name as stated; an all-lowercase
1247      version of the new name; and an all-uppercase version of the new
1248      name.  */
1249   if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1250     {
1251       for (p = nbuf; *p; p++)
1252 	*p = TOUPPER (*p);
1253 
1254       if (strncmp (nbuf, newname, nlen))
1255 	{
1256 	  /* If this attempt to create an additional alias fails, do not bother
1257 	     trying to create the all-lower case alias.  We will fail and issue
1258 	     a second, duplicate error message.  This situation arises when the
1259 	     programmer does something like:
1260 	     foo .req r0
1261 	     Foo .req r1
1262 	     The second .req creates the "Foo" alias but then fails to create
1263 	     the artificial FOO alias because it has already been created by the
1264 	     first .req.  */
1265 	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1266 	    {
1267 	      free (nbuf);
1268 	      return TRUE;
1269 	    }
1270 	}
1271 
1272       for (p = nbuf; *p; p++)
1273 	*p = TOLOWER (*p);
1274 
1275       if (strncmp (nbuf, newname, nlen))
1276 	insert_reg_alias (nbuf, old->number, old->type);
1277     }
1278 
1279   free (nbuf);
1280   return TRUE;
1281 }
1282 
1283 /* Should never be called, as .req goes between the alias and the
1284    register name, not at the beginning of the line.  */
1285 static void
s_req(int a ATTRIBUTE_UNUSED)1286 s_req (int a ATTRIBUTE_UNUSED)
1287 {
1288   as_bad (_("invalid syntax for .req directive"));
1289 }
1290 
1291 /* The .unreq directive deletes an alias which was previously defined
1292    by .req.  For example:
1293 
1294        my_alias .req r11
1295        .unreq my_alias	  */
1296 
1297 static void
s_unreq(int a ATTRIBUTE_UNUSED)1298 s_unreq (int a ATTRIBUTE_UNUSED)
1299 {
1300   char *name;
1301   char saved_char;
1302 
1303   name = input_line_pointer;
1304 
1305   while (*input_line_pointer != 0
1306 	 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1307     ++input_line_pointer;
1308 
1309   saved_char = *input_line_pointer;
1310   *input_line_pointer = 0;
1311 
1312   if (!*name)
1313     as_bad (_("invalid syntax for .unreq directive"));
1314   else
1315     {
1316       reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1317 
1318       if (!reg)
1319 	as_bad (_("unknown register alias '%s'"), name);
1320       else if (reg->builtin)
1321 	as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1322 		 name);
1323       else
1324 	{
1325 	  char *p;
1326 	  char *nbuf;
1327 
1328 	  hash_delete (aarch64_reg_hsh, name, FALSE);
1329 	  free ((char *) reg->name);
1330 	  free (reg);
1331 
1332 	  /* Also locate the all upper case and all lower case versions.
1333 	     Do not complain if we cannot find one or the other as it
1334 	     was probably deleted above.  */
1335 
1336 	  nbuf = strdup (name);
1337 	  for (p = nbuf; *p; p++)
1338 	    *p = TOUPPER (*p);
1339 	  reg = hash_find (aarch64_reg_hsh, nbuf);
1340 	  if (reg)
1341 	    {
1342 	      hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1343 	      free ((char *) reg->name);
1344 	      free (reg);
1345 	    }
1346 
1347 	  for (p = nbuf; *p; p++)
1348 	    *p = TOLOWER (*p);
1349 	  reg = hash_find (aarch64_reg_hsh, nbuf);
1350 	  if (reg)
1351 	    {
1352 	      hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1353 	      free ((char *) reg->name);
1354 	      free (reg);
1355 	    }
1356 
1357 	  free (nbuf);
1358 	}
1359     }
1360 
1361   *input_line_pointer = saved_char;
1362   demand_empty_rest_of_line ();
1363 }
1364 
1365 /* Directives: Instruction set selection.  */
1366 
1367 #ifdef OBJ_ELF
1368 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1369    spec.  (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1370    Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1371    and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
1372 
1373 /* Create a new mapping symbol for the transition to STATE.  */
1374 
1375 static void
make_mapping_symbol(enum mstate state,valueT value,fragS * frag)1376 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1377 {
1378   symbolS *symbolP;
1379   const char *symname;
1380   int type;
1381 
1382   switch (state)
1383     {
1384     case MAP_DATA:
1385       symname = "$d";
1386       type = BSF_NO_FLAGS;
1387       break;
1388     case MAP_INSN:
1389       symname = "$x";
1390       type = BSF_NO_FLAGS;
1391       break;
1392     default:
1393       abort ();
1394     }
1395 
1396   symbolP = symbol_new (symname, now_seg, value, frag);
1397   symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1398 
1399   /* Save the mapping symbols for future reference.  Also check that
1400      we do not place two mapping symbols at the same offset within a
1401      frag.  We'll handle overlap between frags in
1402      check_mapping_symbols.
1403 
1404      If .fill or other data filling directive generates zero sized data,
1405      the mapping symbol for the following code will have the same value
1406      as the one generated for the data filling directive.  In this case,
1407      we replace the old symbol with the new one at the same address.  */
1408   if (value == 0)
1409     {
1410       if (frag->tc_frag_data.first_map != NULL)
1411 	{
1412 	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1413 	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1414 			 &symbol_lastP);
1415 	}
1416       frag->tc_frag_data.first_map = symbolP;
1417     }
1418   if (frag->tc_frag_data.last_map != NULL)
1419     {
1420       know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1421 	    S_GET_VALUE (symbolP));
1422       if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1423 	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1424 		       &symbol_lastP);
1425     }
1426   frag->tc_frag_data.last_map = symbolP;
1427 }
1428 
1429 /* We must sometimes convert a region marked as code to data during
1430    code alignment, if an odd number of bytes have to be padded.  The
1431    code mapping symbol is pushed to an aligned address.  */
1432 
1433 static void
insert_data_mapping_symbol(enum mstate state,valueT value,fragS * frag,offsetT bytes)1434 insert_data_mapping_symbol (enum mstate state,
1435 			    valueT value, fragS * frag, offsetT bytes)
1436 {
1437   /* If there was already a mapping symbol, remove it.  */
1438   if (frag->tc_frag_data.last_map != NULL
1439       && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1440       frag->fr_address + value)
1441     {
1442       symbolS *symp = frag->tc_frag_data.last_map;
1443 
1444       if (value == 0)
1445 	{
1446 	  know (frag->tc_frag_data.first_map == symp);
1447 	  frag->tc_frag_data.first_map = NULL;
1448 	}
1449       frag->tc_frag_data.last_map = NULL;
1450       symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1451     }
1452 
1453   make_mapping_symbol (MAP_DATA, value, frag);
1454   make_mapping_symbol (state, value + bytes, frag);
1455 }
1456 
1457 static void mapping_state_2 (enum mstate state, int max_chars);
1458 
1459 /* Set the mapping state to STATE.  Only call this when about to
1460    emit some STATE bytes to the file.  */
1461 
1462 void
mapping_state(enum mstate state)1463 mapping_state (enum mstate state)
1464 {
1465   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1466 
1467   if (state == MAP_INSN)
1468     /* AArch64 instructions require 4-byte alignment.  When emitting
1469        instructions into any section, record the appropriate section
1470        alignment.  */
1471     record_alignment (now_seg, 2);
1472 
1473   if (mapstate == state)
1474     /* The mapping symbol has already been emitted.
1475        There is nothing else to do.  */
1476     return;
1477 
1478 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1479   if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1480     /* Emit MAP_DATA within executable section in order.  Otherwise, it will be
1481        evaluated later in the next else.  */
1482     return;
1483   else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1484     {
1485       /* Only add the symbol if the offset is > 0:
1486 	 if we're at the first frag, check it's size > 0;
1487 	 if we're not at the first frag, then for sure
1488 	 the offset is > 0.  */
1489       struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1490       const int add_symbol = (frag_now != frag_first)
1491 	|| (frag_now_fix () > 0);
1492 
1493       if (add_symbol)
1494 	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1495     }
1496 #undef TRANSITION
1497 
1498   mapping_state_2 (state, 0);
1499 }
1500 
1501 /* Same as mapping_state, but MAX_CHARS bytes have already been
1502    allocated.  Put the mapping symbol that far back.  */
1503 
1504 static void
mapping_state_2(enum mstate state,int max_chars)1505 mapping_state_2 (enum mstate state, int max_chars)
1506 {
1507   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1508 
1509   if (!SEG_NORMAL (now_seg))
1510     return;
1511 
1512   if (mapstate == state)
1513     /* The mapping symbol has already been emitted.
1514        There is nothing else to do.  */
1515     return;
1516 
1517   seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1518   make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1519 }
1520 #else
1521 #define mapping_state(x)	/* nothing */
1522 #define mapping_state_2(x, y)	/* nothing */
1523 #endif
1524 
1525 /* Directives: sectioning and alignment.  */
1526 
1527 static void
s_bss(int ignore ATTRIBUTE_UNUSED)1528 s_bss (int ignore ATTRIBUTE_UNUSED)
1529 {
1530   /* We don't support putting frags in the BSS segment, we fake it by
1531      marking in_bss, then looking at s_skip for clues.  */
1532   subseg_set (bss_section, 0);
1533   demand_empty_rest_of_line ();
1534   mapping_state (MAP_DATA);
1535 }
1536 
1537 static void
s_even(int ignore ATTRIBUTE_UNUSED)1538 s_even (int ignore ATTRIBUTE_UNUSED)
1539 {
1540   /* Never make frag if expect extra pass.  */
1541   if (!need_pass_2)
1542     frag_align (1, 0, 0);
1543 
1544   record_alignment (now_seg, 1);
1545 
1546   demand_empty_rest_of_line ();
1547 }
1548 
1549 /* Directives: Literal pools.  */
1550 
1551 static literal_pool *
find_literal_pool(int size)1552 find_literal_pool (int size)
1553 {
1554   literal_pool *pool;
1555 
1556   for (pool = list_of_pools; pool != NULL; pool = pool->next)
1557     {
1558       if (pool->section == now_seg
1559 	  && pool->sub_section == now_subseg && pool->size == size)
1560 	break;
1561     }
1562 
1563   return pool;
1564 }
1565 
1566 static literal_pool *
find_or_make_literal_pool(int size)1567 find_or_make_literal_pool (int size)
1568 {
1569   /* Next literal pool ID number.  */
1570   static unsigned int latest_pool_num = 1;
1571   literal_pool *pool;
1572 
1573   pool = find_literal_pool (size);
1574 
1575   if (pool == NULL)
1576     {
1577       /* Create a new pool.  */
1578       pool = XNEW (literal_pool);
1579       if (!pool)
1580 	return NULL;
1581 
1582       /* Currently we always put the literal pool in the current text
1583          section.  If we were generating "small" model code where we
1584          knew that all code and initialised data was within 1MB then
1585          we could output literals to mergeable, read-only data
1586          sections. */
1587 
1588       pool->next_free_entry = 0;
1589       pool->section = now_seg;
1590       pool->sub_section = now_subseg;
1591       pool->size = size;
1592       pool->next = list_of_pools;
1593       pool->symbol = NULL;
1594 
1595       /* Add it to the list.  */
1596       list_of_pools = pool;
1597     }
1598 
1599   /* New pools, and emptied pools, will have a NULL symbol.  */
1600   if (pool->symbol == NULL)
1601     {
1602       pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1603 				    (valueT) 0, &zero_address_frag);
1604       pool->id = latest_pool_num++;
1605     }
1606 
1607   /* Done.  */
1608   return pool;
1609 }
1610 
1611 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1612    Return TRUE on success, otherwise return FALSE.  */
1613 static bfd_boolean
add_to_lit_pool(expressionS * exp,int size)1614 add_to_lit_pool (expressionS *exp, int size)
1615 {
1616   literal_pool *pool;
1617   unsigned int entry;
1618 
1619   pool = find_or_make_literal_pool (size);
1620 
1621   /* Check if this literal value is already in the pool.  */
1622   for (entry = 0; entry < pool->next_free_entry; entry++)
1623     {
1624       expressionS * litexp = & pool->literals[entry].exp;
1625 
1626       if ((litexp->X_op == exp->X_op)
1627 	  && (exp->X_op == O_constant)
1628 	  && (litexp->X_add_number == exp->X_add_number)
1629 	  && (litexp->X_unsigned == exp->X_unsigned))
1630 	break;
1631 
1632       if ((litexp->X_op == exp->X_op)
1633 	  && (exp->X_op == O_symbol)
1634 	  && (litexp->X_add_number == exp->X_add_number)
1635 	  && (litexp->X_add_symbol == exp->X_add_symbol)
1636 	  && (litexp->X_op_symbol == exp->X_op_symbol))
1637 	break;
1638     }
1639 
1640   /* Do we need to create a new entry?  */
1641   if (entry == pool->next_free_entry)
1642     {
1643       if (entry >= MAX_LITERAL_POOL_SIZE)
1644 	{
1645 	  set_syntax_error (_("literal pool overflow"));
1646 	  return FALSE;
1647 	}
1648 
1649       pool->literals[entry].exp = *exp;
1650       pool->next_free_entry += 1;
1651       if (exp->X_op == O_big)
1652 	{
1653 	  /* PR 16688: Bignums are held in a single global array.  We must
1654 	     copy and preserve that value now, before it is overwritten.  */
1655 	  pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1656 						  exp->X_add_number);
1657 	  memcpy (pool->literals[entry].bignum, generic_bignum,
1658 		  CHARS_PER_LITTLENUM * exp->X_add_number);
1659 	}
1660       else
1661 	pool->literals[entry].bignum = NULL;
1662     }
1663 
1664   exp->X_op = O_symbol;
1665   exp->X_add_number = ((int) entry) * size;
1666   exp->X_add_symbol = pool->symbol;
1667 
1668   return TRUE;
1669 }
1670 
1671 /* Can't use symbol_new here, so have to create a symbol and then at
1672    a later date assign it a value. Thats what these functions do.  */
1673 
1674 static void
symbol_locate(symbolS * symbolP,const char * name,segT segment,valueT valu,fragS * frag)1675 symbol_locate (symbolS * symbolP,
1676 	       const char *name,/* It is copied, the caller can modify.  */
1677 	       segT segment,	/* Segment identifier (SEG_<something>).  */
1678 	       valueT valu,	/* Symbol value.  */
1679 	       fragS * frag)	/* Associated fragment.  */
1680 {
1681   size_t name_length;
1682   char *preserved_copy_of_name;
1683 
1684   name_length = strlen (name) + 1;	/* +1 for \0.  */
1685   obstack_grow (&notes, name, name_length);
1686   preserved_copy_of_name = obstack_finish (&notes);
1687 
1688 #ifdef tc_canonicalize_symbol_name
1689   preserved_copy_of_name =
1690     tc_canonicalize_symbol_name (preserved_copy_of_name);
1691 #endif
1692 
1693   S_SET_NAME (symbolP, preserved_copy_of_name);
1694 
1695   S_SET_SEGMENT (symbolP, segment);
1696   S_SET_VALUE (symbolP, valu);
1697   symbol_clear_list_pointers (symbolP);
1698 
1699   symbol_set_frag (symbolP, frag);
1700 
1701   /* Link to end of symbol chain.  */
1702   {
1703     extern int symbol_table_frozen;
1704 
1705     if (symbol_table_frozen)
1706       abort ();
1707   }
1708 
1709   symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1710 
1711   obj_symbol_new_hook (symbolP);
1712 
1713 #ifdef tc_symbol_new_hook
1714   tc_symbol_new_hook (symbolP);
1715 #endif
1716 
1717 #ifdef DEBUG_SYMS
1718   verify_symbol_chain (symbol_rootP, symbol_lastP);
1719 #endif /* DEBUG_SYMS  */
1720 }
1721 
1722 
1723 static void
s_ltorg(int ignored ATTRIBUTE_UNUSED)1724 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1725 {
1726   unsigned int entry;
1727   literal_pool *pool;
1728   char sym_name[20];
1729   int align;
1730 
1731   for (align = 2; align <= 4; align++)
1732     {
1733       int size = 1 << align;
1734 
1735       pool = find_literal_pool (size);
1736       if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1737 	continue;
1738 
1739       /* Align pool as you have word accesses.
1740          Only make a frag if we have to.  */
1741       if (!need_pass_2)
1742 	frag_align (align, 0, 0);
1743 
1744       mapping_state (MAP_DATA);
1745 
1746       record_alignment (now_seg, align);
1747 
1748       sprintf (sym_name, "$$lit_\002%x", pool->id);
1749 
1750       symbol_locate (pool->symbol, sym_name, now_seg,
1751 		     (valueT) frag_now_fix (), frag_now);
1752       symbol_table_insert (pool->symbol);
1753 
1754       for (entry = 0; entry < pool->next_free_entry; entry++)
1755 	{
1756 	  expressionS * exp = & pool->literals[entry].exp;
1757 
1758 	  if (exp->X_op == O_big)
1759 	    {
1760 	      /* PR 16688: Restore the global bignum value.  */
1761 	      gas_assert (pool->literals[entry].bignum != NULL);
1762 	      memcpy (generic_bignum, pool->literals[entry].bignum,
1763 		      CHARS_PER_LITTLENUM * exp->X_add_number);
1764 	    }
1765 
1766 	  /* First output the expression in the instruction to the pool.  */
1767 	  emit_expr (exp, size);	/* .word|.xword  */
1768 
1769 	  if (exp->X_op == O_big)
1770 	    {
1771 	      free (pool->literals[entry].bignum);
1772 	      pool->literals[entry].bignum = NULL;
1773 	    }
1774 	}
1775 
1776       /* Mark the pool as empty.  */
1777       pool->next_free_entry = 0;
1778       pool->symbol = NULL;
1779     }
1780 }
1781 
1782 #ifdef OBJ_ELF
1783 /* Forward declarations for functions below, in the MD interface
1784    section.  */
1785 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1786 static struct reloc_table_entry * find_reloc_table_entry (char **);
1787 
1788 /* Directives: Data.  */
1789 /* N.B. the support for relocation suffix in this directive needs to be
1790    implemented properly.  */
1791 
1792 static void
s_aarch64_elf_cons(int nbytes)1793 s_aarch64_elf_cons (int nbytes)
1794 {
1795   expressionS exp;
1796 
1797 #ifdef md_flush_pending_output
1798   md_flush_pending_output ();
1799 #endif
1800 
1801   if (is_it_end_of_statement ())
1802     {
1803       demand_empty_rest_of_line ();
1804       return;
1805     }
1806 
1807 #ifdef md_cons_align
1808   md_cons_align (nbytes);
1809 #endif
1810 
1811   mapping_state (MAP_DATA);
1812   do
1813     {
1814       struct reloc_table_entry *reloc;
1815 
1816       expression (&exp);
1817 
1818       if (exp.X_op != O_symbol)
1819 	emit_expr (&exp, (unsigned int) nbytes);
1820       else
1821 	{
1822 	  skip_past_char (&input_line_pointer, '#');
1823 	  if (skip_past_char (&input_line_pointer, ':'))
1824 	    {
1825 	      reloc = find_reloc_table_entry (&input_line_pointer);
1826 	      if (reloc == NULL)
1827 		as_bad (_("unrecognized relocation suffix"));
1828 	      else
1829 		as_bad (_("unimplemented relocation suffix"));
1830 	      ignore_rest_of_line ();
1831 	      return;
1832 	    }
1833 	  else
1834 	    emit_expr (&exp, (unsigned int) nbytes);
1835 	}
1836     }
1837   while (*input_line_pointer++ == ',');
1838 
1839   /* Put terminator back into stream.  */
1840   input_line_pointer--;
1841   demand_empty_rest_of_line ();
1842 }
1843 
1844 #endif /* OBJ_ELF */
1845 
1846 /* Output a 32-bit word, but mark as an instruction.  */
1847 
1848 static void
s_aarch64_inst(int ignored ATTRIBUTE_UNUSED)1849 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1850 {
1851   expressionS exp;
1852 
1853 #ifdef md_flush_pending_output
1854   md_flush_pending_output ();
1855 #endif
1856 
1857   if (is_it_end_of_statement ())
1858     {
1859       demand_empty_rest_of_line ();
1860       return;
1861     }
1862 
1863   /* Sections are assumed to start aligned. In executable section, there is no
1864      MAP_DATA symbol pending. So we only align the address during
1865      MAP_DATA --> MAP_INSN transition.
1866      For other sections, this is not guaranteed.  */
1867   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1868   if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1869     frag_align_code (2, 0);
1870 
1871 #ifdef OBJ_ELF
1872   mapping_state (MAP_INSN);
1873 #endif
1874 
1875   do
1876     {
1877       expression (&exp);
1878       if (exp.X_op != O_constant)
1879 	{
1880 	  as_bad (_("constant expression required"));
1881 	  ignore_rest_of_line ();
1882 	  return;
1883 	}
1884 
1885       if (target_big_endian)
1886 	{
1887 	  unsigned int val = exp.X_add_number;
1888 	  exp.X_add_number = SWAP_32 (val);
1889 	}
1890       emit_expr (&exp, 4);
1891     }
1892   while (*input_line_pointer++ == ',');
1893 
1894   /* Put terminator back into stream.  */
1895   input_line_pointer--;
1896   demand_empty_rest_of_line ();
1897 }
1898 
1899 #ifdef OBJ_ELF
1900 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction.  */
1901 
1902 static void
s_tlsdescadd(int ignored ATTRIBUTE_UNUSED)1903 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1904 {
1905   expressionS exp;
1906 
1907   expression (&exp);
1908   frag_grow (4);
1909   fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1910 		   BFD_RELOC_AARCH64_TLSDESC_ADD);
1911 
1912   demand_empty_rest_of_line ();
1913 }
1914 
1915 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction.  */
1916 
1917 static void
s_tlsdesccall(int ignored ATTRIBUTE_UNUSED)1918 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1919 {
1920   expressionS exp;
1921 
1922   /* Since we're just labelling the code, there's no need to define a
1923      mapping symbol.  */
1924   expression (&exp);
1925   /* Make sure there is enough room in this frag for the following
1926      blr.  This trick only works if the blr follows immediately after
1927      the .tlsdesc directive.  */
1928   frag_grow (4);
1929   fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1930 		   BFD_RELOC_AARCH64_TLSDESC_CALL);
1931 
1932   demand_empty_rest_of_line ();
1933 }
1934 
1935 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction.  */
1936 
1937 static void
s_tlsdescldr(int ignored ATTRIBUTE_UNUSED)1938 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
1939 {
1940   expressionS exp;
1941 
1942   expression (&exp);
1943   frag_grow (4);
1944   fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1945 		   BFD_RELOC_AARCH64_TLSDESC_LDR);
1946 
1947   demand_empty_rest_of_line ();
1948 }
1949 #endif	/* OBJ_ELF */
1950 
1951 static void s_aarch64_arch (int);
1952 static void s_aarch64_cpu (int);
1953 static void s_aarch64_arch_extension (int);
1954 
1955 /* This table describes all the machine specific pseudo-ops the assembler
1956    has to support.  The fields are:
1957      pseudo-op name without dot
1958      function to call to execute this pseudo-op
1959      Integer arg to pass to the function.  */
1960 
1961 const pseudo_typeS md_pseudo_table[] = {
1962   /* Never called because '.req' does not start a line.  */
1963   {"req", s_req, 0},
1964   {"unreq", s_unreq, 0},
1965   {"bss", s_bss, 0},
1966   {"even", s_even, 0},
1967   {"ltorg", s_ltorg, 0},
1968   {"pool", s_ltorg, 0},
1969   {"cpu", s_aarch64_cpu, 0},
1970   {"arch", s_aarch64_arch, 0},
1971   {"arch_extension", s_aarch64_arch_extension, 0},
1972   {"inst", s_aarch64_inst, 0},
1973 #ifdef OBJ_ELF
1974   {"tlsdescadd", s_tlsdescadd, 0},
1975   {"tlsdesccall", s_tlsdesccall, 0},
1976   {"tlsdescldr", s_tlsdescldr, 0},
1977   {"word", s_aarch64_elf_cons, 4},
1978   {"long", s_aarch64_elf_cons, 4},
1979   {"xword", s_aarch64_elf_cons, 8},
1980   {"dword", s_aarch64_elf_cons, 8},
1981 #endif
1982   {0, 0, 0}
1983 };
1984 
1985 
1986 /* Check whether STR points to a register name followed by a comma or the
1987    end of line; REG_TYPE indicates which register types are checked
1988    against.  Return TRUE if STR is such a register name; otherwise return
1989    FALSE.  The function does not intend to produce any diagnostics, but since
1990    the register parser aarch64_reg_parse, which is called by this function,
1991    does produce diagnostics, we call clear_error to clear any diagnostics
1992    that may be generated by aarch64_reg_parse.
1993    Also, the function returns FALSE directly if there is any user error
1994    present at the function entry.  This prevents the existing diagnostics
1995    state from being spoiled.
1996    The function currently serves parse_constant_immediate and
1997    parse_big_immediate only.  */
1998 static bfd_boolean
reg_name_p(char * str,aarch64_reg_type reg_type)1999 reg_name_p (char *str, aarch64_reg_type reg_type)
2000 {
2001   int reg;
2002 
2003   /* Prevent the diagnostics state from being spoiled.  */
2004   if (error_p ())
2005     return FALSE;
2006 
2007   reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2008 
2009   /* Clear the parsing error that may be set by the reg parser.  */
2010   clear_error ();
2011 
2012   if (reg == PARSE_FAIL)
2013     return FALSE;
2014 
2015   skip_whitespace (str);
2016   if (*str == ',' || is_end_of_line[(unsigned int) *str])
2017     return TRUE;
2018 
2019   return FALSE;
2020 }
2021 
2022 /* Parser functions used exclusively in instruction operands.  */
2023 
2024 /* Parse an immediate expression which may not be constant.
2025 
2026    To prevent the expression parser from pushing a register name
2027    into the symbol table as an undefined symbol, firstly a check is
2028    done to find out whether STR is a valid register name followed
2029    by a comma or the end of line.  Return FALSE if STR is such a
2030    string.  */
2031 
2032 static bfd_boolean
parse_immediate_expression(char ** str,expressionS * exp)2033 parse_immediate_expression (char **str, expressionS *exp)
2034 {
2035   if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
2036     {
2037       set_recoverable_error (_("immediate operand required"));
2038       return FALSE;
2039     }
2040 
2041   my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2042 
2043   if (exp->X_op == O_absent)
2044     {
2045       set_fatal_syntax_error (_("missing immediate expression"));
2046       return FALSE;
2047     }
2048 
2049   return TRUE;
2050 }
2051 
2052 /* Constant immediate-value read function for use in insn parsing.
2053    STR points to the beginning of the immediate (with the optional
2054    leading #); *VAL receives the value.
2055 
2056    Return TRUE on success; otherwise return FALSE.  */
2057 
2058 static bfd_boolean
parse_constant_immediate(char ** str,int64_t * val)2059 parse_constant_immediate (char **str, int64_t * val)
2060 {
2061   expressionS exp;
2062 
2063   if (! parse_immediate_expression (str, &exp))
2064     return FALSE;
2065 
2066   if (exp.X_op != O_constant)
2067     {
2068       set_syntax_error (_("constant expression required"));
2069       return FALSE;
2070     }
2071 
2072   *val = exp.X_add_number;
2073   return TRUE;
2074 }
2075 
2076 static uint32_t
encode_imm_float_bits(uint32_t imm)2077 encode_imm_float_bits (uint32_t imm)
2078 {
2079   return ((imm >> 19) & 0x7f)	/* b[25:19] -> b[6:0] */
2080     | ((imm >> (31 - 7)) & 0x80);	/* b[31]    -> b[7]   */
2081 }
2082 
2083 /* Return TRUE if the single-precision floating-point value encoded in IMM
2084    can be expressed in the AArch64 8-bit signed floating-point format with
2085    3-bit exponent and normalized 4 bits of precision; in other words, the
2086    floating-point value must be expressable as
2087      (+/-) n / 16 * power (2, r)
2088    where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4.  */
2089 
2090 static bfd_boolean
aarch64_imm_float_p(uint32_t imm)2091 aarch64_imm_float_p (uint32_t imm)
2092 {
2093   /* If a single-precision floating-point value has the following bit
2094      pattern, it can be expressed in the AArch64 8-bit floating-point
2095      format:
2096 
2097      3 32222222 2221111111111
2098      1 09876543 21098765432109876543210
2099      n Eeeeeexx xxxx0000000000000000000
2100 
2101      where n, e and each x are either 0 or 1 independently, with
2102      E == ~ e.  */
2103 
2104   uint32_t pattern;
2105 
2106   /* Prepare the pattern for 'Eeeeee'.  */
2107   if (((imm >> 30) & 0x1) == 0)
2108     pattern = 0x3e000000;
2109   else
2110     pattern = 0x40000000;
2111 
2112   return (imm & 0x7ffff) == 0		/* lower 19 bits are 0.  */
2113     && ((imm & 0x7e000000) == pattern);	/* bits 25 - 29 == ~ bit 30.  */
2114 }
2115 
2116 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2117 
2118    Return TRUE if the value encoded in IMM can be expressed in the AArch64
2119    8-bit signed floating-point format with 3-bit exponent and normalized 4
2120    bits of precision (i.e. can be used in an FMOV instruction); return the
2121    equivalent single-precision encoding in *FPWORD.
2122 
2123    Otherwise return FALSE.  */
2124 
2125 static bfd_boolean
aarch64_double_precision_fmovable(uint64_t imm,uint32_t * fpword)2126 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2127 {
2128   /* If a double-precision floating-point value has the following bit
2129      pattern, it can be expressed in the AArch64 8-bit floating-point
2130      format:
2131 
2132      6 66655555555 554444444...21111111111
2133      3 21098765432 109876543...098765432109876543210
2134      n Eeeeeeeeexx xxxx00000...000000000000000000000
2135 
2136      where n, e and each x are either 0 or 1 independently, with
2137      E == ~ e.  */
2138 
2139   uint32_t pattern;
2140   uint32_t high32 = imm >> 32;
2141 
2142   /* Lower 32 bits need to be 0s.  */
2143   if ((imm & 0xffffffff) != 0)
2144     return FALSE;
2145 
2146   /* Prepare the pattern for 'Eeeeeeeee'.  */
2147   if (((high32 >> 30) & 0x1) == 0)
2148     pattern = 0x3fc00000;
2149   else
2150     pattern = 0x40000000;
2151 
2152   if ((high32 & 0xffff) == 0			/* bits 32 - 47 are 0.  */
2153       && (high32 & 0x7fc00000) == pattern)	/* bits 54 - 61 == ~ bit 62.  */
2154     {
2155       /* Convert to the single-precision encoding.
2156          i.e. convert
2157 	   n Eeeeeeeeexx xxxx00000...000000000000000000000
2158 	 to
2159 	   n Eeeeeexx xxxx0000000000000000000.  */
2160       *fpword = ((high32 & 0xfe000000)			/* nEeeeee.  */
2161 		 | (((high32 >> 16) & 0x3f) << 19));	/* xxxxxx.  */
2162       return TRUE;
2163     }
2164   else
2165     return FALSE;
2166 }
2167 
2168 /* Parse a floating-point immediate.  Return TRUE on success and return the
2169    value in *IMMED in the format of IEEE754 single-precision encoding.
2170    *CCP points to the start of the string; DP_P is TRUE when the immediate
2171    is expected to be in double-precision (N.B. this only matters when
2172    hexadecimal representation is involved).
2173 
2174    N.B. 0.0 is accepted by this function.  */
2175 
2176 static bfd_boolean
parse_aarch64_imm_float(char ** ccp,int * immed,bfd_boolean dp_p)2177 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2178 {
2179   char *str = *ccp;
2180   char *fpnum;
2181   LITTLENUM_TYPE words[MAX_LITTLENUMS];
2182   int found_fpchar = 0;
2183   int64_t val = 0;
2184   unsigned fpword = 0;
2185   bfd_boolean hex_p = FALSE;
2186 
2187   skip_past_char (&str, '#');
2188 
2189   fpnum = str;
2190   skip_whitespace (fpnum);
2191 
2192   if (strncmp (fpnum, "0x", 2) == 0)
2193     {
2194       /* Support the hexadecimal representation of the IEEE754 encoding.
2195 	 Double-precision is expected when DP_P is TRUE, otherwise the
2196 	 representation should be in single-precision.  */
2197       if (! parse_constant_immediate (&str, &val))
2198 	goto invalid_fp;
2199 
2200       if (dp_p)
2201 	{
2202 	  if (! aarch64_double_precision_fmovable (val, &fpword))
2203 	    goto invalid_fp;
2204 	}
2205       else if ((uint64_t) val > 0xffffffff)
2206 	goto invalid_fp;
2207       else
2208 	fpword = val;
2209 
2210       hex_p = TRUE;
2211     }
2212   else
2213     {
2214       /* We must not accidentally parse an integer as a floating-point number.
2215 	 Make sure that the value we parse is not an integer by checking for
2216 	 special characters '.' or 'e'.  */
2217       for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2218 	if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2219 	  {
2220 	    found_fpchar = 1;
2221 	    break;
2222 	  }
2223 
2224       if (!found_fpchar)
2225 	return FALSE;
2226     }
2227 
2228   if (! hex_p)
2229     {
2230       int i;
2231 
2232       if ((str = atof_ieee (str, 's', words)) == NULL)
2233 	goto invalid_fp;
2234 
2235       /* Our FP word must be 32 bits (single-precision FP).  */
2236       for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2237 	{
2238 	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
2239 	  fpword |= words[i];
2240 	}
2241     }
2242 
2243   if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2244     {
2245       *immed = fpword;
2246       *ccp = str;
2247       return TRUE;
2248     }
2249 
2250 invalid_fp:
2251   set_fatal_syntax_error (_("invalid floating-point constant"));
2252   return FALSE;
2253 }
2254 
2255 /* Less-generic immediate-value read function with the possibility of loading
2256    a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2257    instructions.
2258 
2259    To prevent the expression parser from pushing a register name into the
2260    symbol table as an undefined symbol, a check is firstly done to find
2261    out whether STR is a valid register name followed by a comma or the end
2262    of line.  Return FALSE if STR is such a register.  */
2263 
2264 static bfd_boolean
parse_big_immediate(char ** str,int64_t * imm)2265 parse_big_immediate (char **str, int64_t *imm)
2266 {
2267   char *ptr = *str;
2268 
2269   if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2270     {
2271       set_syntax_error (_("immediate operand required"));
2272       return FALSE;
2273     }
2274 
2275   my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2276 
2277   if (inst.reloc.exp.X_op == O_constant)
2278     *imm = inst.reloc.exp.X_add_number;
2279 
2280   *str = ptr;
2281 
2282   return TRUE;
2283 }
2284 
2285 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2286    if NEED_LIBOPCODES is non-zero, the fixup will need
2287    assistance from the libopcodes.   */
2288 
2289 static inline void
aarch64_set_gas_internal_fixup(struct reloc * reloc,const aarch64_opnd_info * operand,int need_libopcodes_p)2290 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2291 				const aarch64_opnd_info *operand,
2292 				int need_libopcodes_p)
2293 {
2294   reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2295   reloc->opnd = operand->type;
2296   if (need_libopcodes_p)
2297     reloc->need_libopcodes_p = 1;
2298 };
2299 
2300 /* Return TRUE if the instruction needs to be fixed up later internally by
2301    the GAS; otherwise return FALSE.  */
2302 
2303 static inline bfd_boolean
aarch64_gas_internal_fixup_p(void)2304 aarch64_gas_internal_fixup_p (void)
2305 {
2306   return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2307 }
2308 
2309 /* Assign the immediate value to the relavant field in *OPERAND if
2310    RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2311    needs an internal fixup in a later stage.
2312    ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2313    IMM.VALUE that may get assigned with the constant.  */
2314 static inline void
assign_imm_if_const_or_fixup_later(struct reloc * reloc,aarch64_opnd_info * operand,int addr_off_p,int need_libopcodes_p,int skip_p)2315 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2316 				    aarch64_opnd_info *operand,
2317 				    int addr_off_p,
2318 				    int need_libopcodes_p,
2319 				    int skip_p)
2320 {
2321   if (reloc->exp.X_op == O_constant)
2322     {
2323       if (addr_off_p)
2324 	operand->addr.offset.imm = reloc->exp.X_add_number;
2325       else
2326 	operand->imm.value = reloc->exp.X_add_number;
2327       reloc->type = BFD_RELOC_UNUSED;
2328     }
2329   else
2330     {
2331       aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2332       /* Tell libopcodes to ignore this operand or not.  This is helpful
2333 	 when one of the operands needs to be fixed up later but we need
2334 	 libopcodes to check the other operands.  */
2335       operand->skip = skip_p;
2336     }
2337 }
2338 
2339 /* Relocation modifiers.  Each entry in the table contains the textual
2340    name for the relocation which may be placed before a symbol used as
2341    a load/store offset, or add immediate. It must be surrounded by a
2342    leading and trailing colon, for example:
2343 
2344 	ldr	x0, [x1, #:rello:varsym]
2345 	add	x0, x1, #:rello:varsym  */
2346 
2347 struct reloc_table_entry
2348 {
2349   const char *name;
2350   int pc_rel;
2351   bfd_reloc_code_real_type adr_type;
2352   bfd_reloc_code_real_type adrp_type;
2353   bfd_reloc_code_real_type movw_type;
2354   bfd_reloc_code_real_type add_type;
2355   bfd_reloc_code_real_type ldst_type;
2356   bfd_reloc_code_real_type ld_literal_type;
2357 };
2358 
2359 static struct reloc_table_entry reloc_table[] = {
2360   /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2361   {"lo12", 0,
2362    0,				/* adr_type */
2363    0,
2364    0,
2365    BFD_RELOC_AARCH64_ADD_LO12,
2366    BFD_RELOC_AARCH64_LDST_LO12,
2367    0},
2368 
2369   /* Higher 21 bits of pc-relative page offset: ADRP */
2370   {"pg_hi21", 1,
2371    0,				/* adr_type */
2372    BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2373    0,
2374    0,
2375    0,
2376    0},
2377 
2378   /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2379   {"pg_hi21_nc", 1,
2380    0,				/* adr_type */
2381    BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2382    0,
2383    0,
2384    0,
2385    0},
2386 
2387   /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2388   {"abs_g0", 0,
2389    0,				/* adr_type */
2390    0,
2391    BFD_RELOC_AARCH64_MOVW_G0,
2392    0,
2393    0,
2394    0},
2395 
2396   /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2397   {"abs_g0_s", 0,
2398    0,				/* adr_type */
2399    0,
2400    BFD_RELOC_AARCH64_MOVW_G0_S,
2401    0,
2402    0,
2403    0},
2404 
2405   /* Less significant bits 0-15 of address/value: MOVK, no check */
2406   {"abs_g0_nc", 0,
2407    0,				/* adr_type */
2408    0,
2409    BFD_RELOC_AARCH64_MOVW_G0_NC,
2410    0,
2411    0,
2412    0},
2413 
2414   /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2415   {"abs_g1", 0,
2416    0,				/* adr_type */
2417    0,
2418    BFD_RELOC_AARCH64_MOVW_G1,
2419    0,
2420    0,
2421    0},
2422 
2423   /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2424   {"abs_g1_s", 0,
2425    0,				/* adr_type */
2426    0,
2427    BFD_RELOC_AARCH64_MOVW_G1_S,
2428    0,
2429    0,
2430    0},
2431 
2432   /* Less significant bits 16-31 of address/value: MOVK, no check */
2433   {"abs_g1_nc", 0,
2434    0,				/* adr_type */
2435    0,
2436    BFD_RELOC_AARCH64_MOVW_G1_NC,
2437    0,
2438    0,
2439    0},
2440 
2441   /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2442   {"abs_g2", 0,
2443    0,				/* adr_type */
2444    0,
2445    BFD_RELOC_AARCH64_MOVW_G2,
2446    0,
2447    0,
2448    0},
2449 
2450   /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2451   {"abs_g2_s", 0,
2452    0,				/* adr_type */
2453    0,
2454    BFD_RELOC_AARCH64_MOVW_G2_S,
2455    0,
2456    0,
2457    0},
2458 
2459   /* Less significant bits 32-47 of address/value: MOVK, no check */
2460   {"abs_g2_nc", 0,
2461    0,				/* adr_type */
2462    0,
2463    BFD_RELOC_AARCH64_MOVW_G2_NC,
2464    0,
2465    0,
2466    0},
2467 
2468   /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2469   {"abs_g3", 0,
2470    0,				/* adr_type */
2471    0,
2472    BFD_RELOC_AARCH64_MOVW_G3,
2473    0,
2474    0,
2475    0},
2476 
2477   /* Get to the page containing GOT entry for a symbol.  */
2478   {"got", 1,
2479    0,				/* adr_type */
2480    BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2481    0,
2482    0,
2483    0,
2484    BFD_RELOC_AARCH64_GOT_LD_PREL19},
2485 
2486   /* 12 bit offset into the page containing GOT entry for that symbol.  */
2487   {"got_lo12", 0,
2488    0,				/* adr_type */
2489    0,
2490    0,
2491    0,
2492    BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2493    0},
2494 
2495   /* 0-15 bits of address/value: MOVk, no check.  */
2496   {"gotoff_g0_nc", 0,
2497    0,				/* adr_type */
2498    0,
2499    BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2500    0,
2501    0,
2502    0},
2503 
2504   /* Most significant bits 16-31 of address/value: MOVZ.  */
2505   {"gotoff_g1", 0,
2506    0,				/* adr_type */
2507    0,
2508    BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2509    0,
2510    0,
2511    0},
2512 
2513   /* 15 bit offset into the page containing GOT entry for that symbol.  */
2514   {"gotoff_lo15", 0,
2515    0,				/* adr_type */
2516    0,
2517    0,
2518    0,
2519    BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2520    0},
2521 
2522   /* Get to the page containing GOT TLS entry for a symbol */
2523   {"gottprel_g0_nc", 0,
2524    0,				/* adr_type */
2525    0,
2526    BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2527    0,
2528    0,
2529    0},
2530 
2531   /* Get to the page containing GOT TLS entry for a symbol */
2532   {"gottprel_g1", 0,
2533    0,				/* adr_type */
2534    0,
2535    BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2536    0,
2537    0,
2538    0},
2539 
2540   /* Get to the page containing GOT TLS entry for a symbol */
2541   {"tlsgd", 0,
2542    BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2543    BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2544    0,
2545    0,
2546    0,
2547    0},
2548 
2549   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2550   {"tlsgd_lo12", 0,
2551    0,				/* adr_type */
2552    0,
2553    0,
2554    BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2555    0,
2556    0},
2557 
2558   /* Lower 16 bits address/value: MOVk.  */
2559   {"tlsgd_g0_nc", 0,
2560    0,				/* adr_type */
2561    0,
2562    BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2563    0,
2564    0,
2565    0},
2566 
2567   /* Most significant bits 16-31 of address/value: MOVZ.  */
2568   {"tlsgd_g1", 0,
2569    0,				/* adr_type */
2570    0,
2571    BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2572    0,
2573    0,
2574    0},
2575 
2576   /* Get to the page containing GOT TLS entry for a symbol */
2577   {"tlsdesc", 0,
2578    BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2579    BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2580    0,
2581    0,
2582    0,
2583    BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2584 
2585   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2586   {"tlsdesc_lo12", 0,
2587    0,				/* adr_type */
2588    0,
2589    0,
2590    BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2591    BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2592    0},
2593 
2594   /* Get to the page containing GOT TLS entry for a symbol.
2595      The same as GD, we allocate two consecutive GOT slots
2596      for module index and module offset, the only difference
2597      with GD is the module offset should be intialized to
2598      zero without any outstanding runtime relocation. */
2599   {"tlsldm", 0,
2600    BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2601    BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2602    0,
2603    0,
2604    0,
2605    0},
2606 
2607   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2608   {"tlsldm_lo12_nc", 0,
2609    0,				/* adr_type */
2610    0,
2611    0,
2612    BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2613    0,
2614    0},
2615 
2616   /* 12 bit offset into the module TLS base address.  */
2617   {"dtprel_lo12", 0,
2618    0,				/* adr_type */
2619    0,
2620    0,
2621    BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2622    BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2623    0},
2624 
2625   /* Same as dtprel_lo12, no overflow check.  */
2626   {"dtprel_lo12_nc", 0,
2627    0,				/* adr_type */
2628    0,
2629    0,
2630    BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2631    BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2632    0},
2633 
2634   /* bits[23:12] of offset to the module TLS base address.  */
2635   {"dtprel_hi12", 0,
2636    0,				/* adr_type */
2637    0,
2638    0,
2639    BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2640    0,
2641    0},
2642 
2643   /* bits[15:0] of offset to the module TLS base address.  */
2644   {"dtprel_g0", 0,
2645    0,				/* adr_type */
2646    0,
2647    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2648    0,
2649    0,
2650    0},
2651 
2652   /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0.  */
2653   {"dtprel_g0_nc", 0,
2654    0,				/* adr_type */
2655    0,
2656    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2657    0,
2658    0,
2659    0},
2660 
2661   /* bits[31:16] of offset to the module TLS base address.  */
2662   {"dtprel_g1", 0,
2663    0,				/* adr_type */
2664    0,
2665    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2666    0,
2667    0,
2668    0},
2669 
2670   /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1.  */
2671   {"dtprel_g1_nc", 0,
2672    0,				/* adr_type */
2673    0,
2674    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2675    0,
2676    0,
2677    0},
2678 
2679   /* bits[47:32] of offset to the module TLS base address.  */
2680   {"dtprel_g2", 0,
2681    0,				/* adr_type */
2682    0,
2683    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2684    0,
2685    0,
2686    0},
2687 
2688   /* Lower 16 bit offset into GOT entry for a symbol */
2689   {"tlsdesc_off_g0_nc", 0,
2690    0,				/* adr_type */
2691    0,
2692    BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2693    0,
2694    0,
2695    0},
2696 
2697   /* Higher 16 bit offset into GOT entry for a symbol */
2698   {"tlsdesc_off_g1", 0,
2699    0,				/* adr_type */
2700    0,
2701    BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2702    0,
2703    0,
2704    0},
2705 
2706   /* Get to the page containing GOT TLS entry for a symbol */
2707   {"gottprel", 0,
2708    0,				/* adr_type */
2709    BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2710    0,
2711    0,
2712    0,
2713    BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2714 
2715   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2716   {"gottprel_lo12", 0,
2717    0,				/* adr_type */
2718    0,
2719    0,
2720    0,
2721    BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2722    0},
2723 
2724   /* Get tp offset for a symbol.  */
2725   {"tprel", 0,
2726    0,				/* adr_type */
2727    0,
2728    0,
2729    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2730    0,
2731    0},
2732 
2733   /* Get tp offset for a symbol.  */
2734   {"tprel_lo12", 0,
2735    0,				/* adr_type */
2736    0,
2737    0,
2738    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2739    0,
2740    0},
2741 
2742   /* Get tp offset for a symbol.  */
2743   {"tprel_hi12", 0,
2744    0,				/* adr_type */
2745    0,
2746    0,
2747    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2748    0,
2749    0},
2750 
2751   /* Get tp offset for a symbol.  */
2752   {"tprel_lo12_nc", 0,
2753    0,				/* adr_type */
2754    0,
2755    0,
2756    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2757    0,
2758    0},
2759 
2760   /* Most significant bits 32-47 of address/value: MOVZ.  */
2761   {"tprel_g2", 0,
2762    0,				/* adr_type */
2763    0,
2764    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2765    0,
2766    0,
2767    0},
2768 
2769   /* Most significant bits 16-31 of address/value: MOVZ.  */
2770   {"tprel_g1", 0,
2771    0,				/* adr_type */
2772    0,
2773    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2774    0,
2775    0,
2776    0},
2777 
2778   /* Most significant bits 16-31 of address/value: MOVZ, no check.  */
2779   {"tprel_g1_nc", 0,
2780    0,				/* adr_type */
2781    0,
2782    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2783    0,
2784    0,
2785    0},
2786 
2787   /* Most significant bits 0-15 of address/value: MOVZ.  */
2788   {"tprel_g0", 0,
2789    0,				/* adr_type */
2790    0,
2791    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2792    0,
2793    0,
2794    0},
2795 
2796   /* Most significant bits 0-15 of address/value: MOVZ, no check.  */
2797   {"tprel_g0_nc", 0,
2798    0,				/* adr_type */
2799    0,
2800    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2801    0,
2802    0,
2803    0},
2804 
2805   /* 15bit offset from got entry to base address of GOT table.  */
2806   {"gotpage_lo15", 0,
2807    0,
2808    0,
2809    0,
2810    0,
2811    BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2812    0},
2813 
2814   /* 14bit offset from got entry to base address of GOT table.  */
2815   {"gotpage_lo14", 0,
2816    0,
2817    0,
2818    0,
2819    0,
2820    BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2821    0},
2822 };
2823 
2824 /* Given the address of a pointer pointing to the textual name of a
2825    relocation as may appear in assembler source, attempt to find its
2826    details in reloc_table.  The pointer will be updated to the character
2827    after the trailing colon.  On failure, NULL will be returned;
2828    otherwise return the reloc_table_entry.  */
2829 
2830 static struct reloc_table_entry *
find_reloc_table_entry(char ** str)2831 find_reloc_table_entry (char **str)
2832 {
2833   unsigned int i;
2834   for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2835     {
2836       int length = strlen (reloc_table[i].name);
2837 
2838       if (strncasecmp (reloc_table[i].name, *str, length) == 0
2839 	  && (*str)[length] == ':')
2840 	{
2841 	  *str += (length + 1);
2842 	  return &reloc_table[i];
2843 	}
2844     }
2845 
2846   return NULL;
2847 }
2848 
2849 /* Mode argument to parse_shift and parser_shifter_operand.  */
2850 enum parse_shift_mode
2851 {
2852   SHIFTED_ARITH_IMM,		/* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2853 				   "#imm{,lsl #n}"  */
2854   SHIFTED_LOGIC_IMM,		/* "rn{,lsl|lsr|asl|asr|ror #n}" or
2855 				   "#imm"  */
2856   SHIFTED_LSL,			/* bare "lsl #n"  */
2857   SHIFTED_LSL_MSL,		/* "lsl|msl #n"  */
2858   SHIFTED_REG_OFFSET		/* [su]xtw|sxtx {#n} or lsl #n  */
2859 };
2860 
2861 /* Parse a <shift> operator on an AArch64 data processing instruction.
2862    Return TRUE on success; otherwise return FALSE.  */
2863 static bfd_boolean
parse_shift(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)2864 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2865 {
2866   const struct aarch64_name_value_pair *shift_op;
2867   enum aarch64_modifier_kind kind;
2868   expressionS exp;
2869   int exp_has_prefix;
2870   char *s = *str;
2871   char *p = s;
2872 
2873   for (p = *str; ISALPHA (*p); p++)
2874     ;
2875 
2876   if (p == *str)
2877     {
2878       set_syntax_error (_("shift expression expected"));
2879       return FALSE;
2880     }
2881 
2882   shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2883 
2884   if (shift_op == NULL)
2885     {
2886       set_syntax_error (_("shift operator expected"));
2887       return FALSE;
2888     }
2889 
2890   kind = aarch64_get_operand_modifier (shift_op);
2891 
2892   if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2893     {
2894       set_syntax_error (_("invalid use of 'MSL'"));
2895       return FALSE;
2896     }
2897 
2898   switch (mode)
2899     {
2900     case SHIFTED_LOGIC_IMM:
2901       if (aarch64_extend_operator_p (kind) == TRUE)
2902 	{
2903 	  set_syntax_error (_("extending shift is not permitted"));
2904 	  return FALSE;
2905 	}
2906       break;
2907 
2908     case SHIFTED_ARITH_IMM:
2909       if (kind == AARCH64_MOD_ROR)
2910 	{
2911 	  set_syntax_error (_("'ROR' shift is not permitted"));
2912 	  return FALSE;
2913 	}
2914       break;
2915 
2916     case SHIFTED_LSL:
2917       if (kind != AARCH64_MOD_LSL)
2918 	{
2919 	  set_syntax_error (_("only 'LSL' shift is permitted"));
2920 	  return FALSE;
2921 	}
2922       break;
2923 
2924     case SHIFTED_REG_OFFSET:
2925       if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2926 	  && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2927 	{
2928 	  set_fatal_syntax_error
2929 	    (_("invalid shift for the register offset addressing mode"));
2930 	  return FALSE;
2931 	}
2932       break;
2933 
2934     case SHIFTED_LSL_MSL:
2935       if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2936 	{
2937 	  set_syntax_error (_("invalid shift operator"));
2938 	  return FALSE;
2939 	}
2940       break;
2941 
2942     default:
2943       abort ();
2944     }
2945 
2946   /* Whitespace can appear here if the next thing is a bare digit.  */
2947   skip_whitespace (p);
2948 
2949   /* Parse shift amount.  */
2950   exp_has_prefix = 0;
2951   if (mode == SHIFTED_REG_OFFSET && *p == ']')
2952     exp.X_op = O_absent;
2953   else
2954     {
2955       if (is_immediate_prefix (*p))
2956 	{
2957 	  p++;
2958 	  exp_has_prefix = 1;
2959 	}
2960       my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2961     }
2962   if (exp.X_op == O_absent)
2963     {
2964       if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2965 	{
2966 	  set_syntax_error (_("missing shift amount"));
2967 	  return FALSE;
2968 	}
2969       operand->shifter.amount = 0;
2970     }
2971   else if (exp.X_op != O_constant)
2972     {
2973       set_syntax_error (_("constant shift amount required"));
2974       return FALSE;
2975     }
2976   else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2977     {
2978       set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2979       return FALSE;
2980     }
2981   else
2982     {
2983       operand->shifter.amount = exp.X_add_number;
2984       operand->shifter.amount_present = 1;
2985     }
2986 
2987   operand->shifter.operator_present = 1;
2988   operand->shifter.kind = kind;
2989 
2990   *str = p;
2991   return TRUE;
2992 }
2993 
2994 /* Parse a <shifter_operand> for a data processing instruction:
2995 
2996       #<immediate>
2997       #<immediate>, LSL #imm
2998 
2999    Validation of immediate operands is deferred to md_apply_fix.
3000 
3001    Return TRUE on success; otherwise return FALSE.  */
3002 
3003 static bfd_boolean
parse_shifter_operand_imm(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3004 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3005 			   enum parse_shift_mode mode)
3006 {
3007   char *p;
3008 
3009   if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3010     return FALSE;
3011 
3012   p = *str;
3013 
3014   /* Accept an immediate expression.  */
3015   if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3016     return FALSE;
3017 
3018   /* Accept optional LSL for arithmetic immediate values.  */
3019   if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3020     if (! parse_shift (&p, operand, SHIFTED_LSL))
3021       return FALSE;
3022 
3023   /* Not accept any shifter for logical immediate values.  */
3024   if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3025       && parse_shift (&p, operand, mode))
3026     {
3027       set_syntax_error (_("unexpected shift operator"));
3028       return FALSE;
3029     }
3030 
3031   *str = p;
3032   return TRUE;
3033 }
3034 
3035 /* Parse a <shifter_operand> for a data processing instruction:
3036 
3037       <Rm>
3038       <Rm>, <shift>
3039       #<immediate>
3040       #<immediate>, LSL #imm
3041 
3042    where <shift> is handled by parse_shift above, and the last two
3043    cases are handled by the function above.
3044 
3045    Validation of immediate operands is deferred to md_apply_fix.
3046 
3047    Return TRUE on success; otherwise return FALSE.  */
3048 
3049 static bfd_boolean
parse_shifter_operand(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3050 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3051 		       enum parse_shift_mode mode)
3052 {
3053   int reg;
3054   int isreg32, isregzero;
3055   enum aarch64_operand_class opd_class
3056     = aarch64_get_operand_class (operand->type);
3057 
3058   if ((reg =
3059        aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
3060     {
3061       if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3062 	{
3063 	  set_syntax_error (_("unexpected register in the immediate operand"));
3064 	  return FALSE;
3065 	}
3066 
3067       if (!isregzero && reg == REG_SP)
3068 	{
3069 	  set_syntax_error (BAD_SP);
3070 	  return FALSE;
3071 	}
3072 
3073       operand->reg.regno = reg;
3074       operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
3075 
3076       /* Accept optional shift operation on register.  */
3077       if (! skip_past_comma (str))
3078 	return TRUE;
3079 
3080       if (! parse_shift (str, operand, mode))
3081 	return FALSE;
3082 
3083       return TRUE;
3084     }
3085   else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3086     {
3087       set_syntax_error
3088 	(_("integer register expected in the extended/shifted operand "
3089 	   "register"));
3090       return FALSE;
3091     }
3092 
3093   /* We have a shifted immediate variable.  */
3094   return parse_shifter_operand_imm (str, operand, mode);
3095 }
3096 
3097 /* Return TRUE on success; return FALSE otherwise.  */
3098 
3099 static bfd_boolean
parse_shifter_operand_reloc(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3100 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3101 			     enum parse_shift_mode mode)
3102 {
3103   char *p = *str;
3104 
3105   /* Determine if we have the sequence of characters #: or just :
3106      coming next.  If we do, then we check for a :rello: relocation
3107      modifier.  If we don't, punt the whole lot to
3108      parse_shifter_operand.  */
3109 
3110   if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3111     {
3112       struct reloc_table_entry *entry;
3113 
3114       if (p[0] == '#')
3115 	p += 2;
3116       else
3117 	p++;
3118       *str = p;
3119 
3120       /* Try to parse a relocation.  Anything else is an error.  */
3121       if (!(entry = find_reloc_table_entry (str)))
3122 	{
3123 	  set_syntax_error (_("unknown relocation modifier"));
3124 	  return FALSE;
3125 	}
3126 
3127       if (entry->add_type == 0)
3128 	{
3129 	  set_syntax_error
3130 	    (_("this relocation modifier is not allowed on this instruction"));
3131 	  return FALSE;
3132 	}
3133 
3134       /* Save str before we decompose it.  */
3135       p = *str;
3136 
3137       /* Next, we parse the expression.  */
3138       if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3139 	return FALSE;
3140 
3141       /* Record the relocation type (use the ADD variant here).  */
3142       inst.reloc.type = entry->add_type;
3143       inst.reloc.pc_rel = entry->pc_rel;
3144 
3145       /* If str is empty, we've reached the end, stop here.  */
3146       if (**str == '\0')
3147 	return TRUE;
3148 
3149       /* Otherwise, we have a shifted reloc modifier, so rewind to
3150          recover the variable name and continue parsing for the shifter.  */
3151       *str = p;
3152       return parse_shifter_operand_imm (str, operand, mode);
3153     }
3154 
3155   return parse_shifter_operand (str, operand, mode);
3156 }
3157 
3158 /* Parse all forms of an address expression.  Information is written
3159    to *OPERAND and/or inst.reloc.
3160 
3161    The A64 instruction set has the following addressing modes:
3162 
3163    Offset
3164      [base]			// in SIMD ld/st structure
3165      [base{,#0}]		// in ld/st exclusive
3166      [base{,#imm}]
3167      [base,Xm{,LSL #imm}]
3168      [base,Xm,SXTX {#imm}]
3169      [base,Wm,(S|U)XTW {#imm}]
3170    Pre-indexed
3171      [base,#imm]!
3172    Post-indexed
3173      [base],#imm
3174      [base],Xm			// in SIMD ld/st structure
3175    PC-relative (literal)
3176      label
3177      =immediate
3178 
3179    (As a convenience, the notation "=immediate" is permitted in conjunction
3180    with the pc-relative literal load instructions to automatically place an
3181    immediate value or symbolic address in a nearby literal pool and generate
3182    a hidden label which references it.)
3183 
3184    Upon a successful parsing, the address structure in *OPERAND will be
3185    filled in the following way:
3186 
3187      .base_regno = <base>
3188      .offset.is_reg	// 1 if the offset is a register
3189      .offset.imm = <imm>
3190      .offset.regno = <Rm>
3191 
3192    For different addressing modes defined in the A64 ISA:
3193 
3194    Offset
3195      .pcrel=0; .preind=1; .postind=0; .writeback=0
3196    Pre-indexed
3197      .pcrel=0; .preind=1; .postind=0; .writeback=1
3198    Post-indexed
3199      .pcrel=0; .preind=0; .postind=1; .writeback=1
3200    PC-relative (literal)
3201      .pcrel=1; .preind=1; .postind=0; .writeback=0
3202 
3203    The shift/extension information, if any, will be stored in .shifter.
3204 
3205    It is the caller's responsibility to check for addressing modes not
3206    supported by the instruction, and to set inst.reloc.type.  */
3207 
3208 static bfd_boolean
parse_address_main(char ** str,aarch64_opnd_info * operand,int reloc,int accept_reg_post_index)3209 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
3210 		    int accept_reg_post_index)
3211 {
3212   char *p = *str;
3213   int reg;
3214   int isreg32, isregzero;
3215   expressionS *exp = &inst.reloc.exp;
3216 
3217   if (! skip_past_char (&p, '['))
3218     {
3219       /* =immediate or label.  */
3220       operand->addr.pcrel = 1;
3221       operand->addr.preind = 1;
3222 
3223       /* #:<reloc_op>:<symbol>  */
3224       skip_past_char (&p, '#');
3225       if (reloc && skip_past_char (&p, ':'))
3226 	{
3227 	  bfd_reloc_code_real_type ty;
3228 	  struct reloc_table_entry *entry;
3229 
3230 	  /* Try to parse a relocation modifier.  Anything else is
3231 	     an error.  */
3232 	  entry = find_reloc_table_entry (&p);
3233 	  if (! entry)
3234 	    {
3235 	      set_syntax_error (_("unknown relocation modifier"));
3236 	      return FALSE;
3237 	    }
3238 
3239 	  switch (operand->type)
3240 	    {
3241 	    case AARCH64_OPND_ADDR_PCREL21:
3242 	      /* adr */
3243 	      ty = entry->adr_type;
3244 	      break;
3245 
3246 	    default:
3247 	      ty = entry->ld_literal_type;
3248 	      break;
3249 	    }
3250 
3251 	  if (ty == 0)
3252 	    {
3253 	      set_syntax_error
3254 		(_("this relocation modifier is not allowed on this "
3255 		   "instruction"));
3256 	      return FALSE;
3257 	    }
3258 
3259 	  /* #:<reloc_op>:  */
3260 	  if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3261 	    {
3262 	      set_syntax_error (_("invalid relocation expression"));
3263 	      return FALSE;
3264 	    }
3265 
3266 	  /* #:<reloc_op>:<expr>  */
3267 	  /* Record the relocation type.  */
3268 	  inst.reloc.type = ty;
3269 	  inst.reloc.pc_rel = entry->pc_rel;
3270 	}
3271       else
3272 	{
3273 
3274 	  if (skip_past_char (&p, '='))
3275 	    /* =immediate; need to generate the literal in the literal pool. */
3276 	    inst.gen_lit_pool = 1;
3277 
3278 	  if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3279 	    {
3280 	      set_syntax_error (_("invalid address"));
3281 	      return FALSE;
3282 	    }
3283 	}
3284 
3285       *str = p;
3286       return TRUE;
3287     }
3288 
3289   /* [ */
3290 
3291   /* Accept SP and reject ZR */
3292   reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3293   if (reg == PARSE_FAIL || isreg32)
3294     {
3295       set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3296       return FALSE;
3297     }
3298   operand->addr.base_regno = reg;
3299 
3300   /* [Xn */
3301   if (skip_past_comma (&p))
3302     {
3303       /* [Xn, */
3304       operand->addr.preind = 1;
3305 
3306       /* Reject SP and accept ZR */
3307       reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3308       if (reg != PARSE_FAIL)
3309 	{
3310 	  /* [Xn,Rm  */
3311 	  operand->addr.offset.regno = reg;
3312 	  operand->addr.offset.is_reg = 1;
3313 	  /* Shifted index.  */
3314 	  if (skip_past_comma (&p))
3315 	    {
3316 	      /* [Xn,Rm,  */
3317 	      if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3318 		/* Use the diagnostics set in parse_shift, so not set new
3319 		   error message here.  */
3320 		return FALSE;
3321 	    }
3322 	  /* We only accept:
3323 	     [base,Xm{,LSL #imm}]
3324 	     [base,Xm,SXTX {#imm}]
3325 	     [base,Wm,(S|U)XTW {#imm}]  */
3326 	  if (operand->shifter.kind == AARCH64_MOD_NONE
3327 	      || operand->shifter.kind == AARCH64_MOD_LSL
3328 	      || operand->shifter.kind == AARCH64_MOD_SXTX)
3329 	    {
3330 	      if (isreg32)
3331 		{
3332 		  set_syntax_error (_("invalid use of 32-bit register offset"));
3333 		  return FALSE;
3334 		}
3335 	    }
3336 	  else if (!isreg32)
3337 	    {
3338 	      set_syntax_error (_("invalid use of 64-bit register offset"));
3339 	      return FALSE;
3340 	    }
3341 	}
3342       else
3343 	{
3344 	  /* [Xn,#:<reloc_op>:<symbol>  */
3345 	  skip_past_char (&p, '#');
3346 	  if (reloc && skip_past_char (&p, ':'))
3347 	    {
3348 	      struct reloc_table_entry *entry;
3349 
3350 	      /* Try to parse a relocation modifier.  Anything else is
3351 		 an error.  */
3352 	      if (!(entry = find_reloc_table_entry (&p)))
3353 		{
3354 		  set_syntax_error (_("unknown relocation modifier"));
3355 		  return FALSE;
3356 		}
3357 
3358 	      if (entry->ldst_type == 0)
3359 		{
3360 		  set_syntax_error
3361 		    (_("this relocation modifier is not allowed on this "
3362 		       "instruction"));
3363 		  return FALSE;
3364 		}
3365 
3366 	      /* [Xn,#:<reloc_op>:  */
3367 	      /* We now have the group relocation table entry corresponding to
3368 	         the name in the assembler source.  Next, we parse the
3369 	         expression.  */
3370 	      if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3371 		{
3372 		  set_syntax_error (_("invalid relocation expression"));
3373 		  return FALSE;
3374 		}
3375 
3376 	      /* [Xn,#:<reloc_op>:<expr>  */
3377 	      /* Record the load/store relocation type.  */
3378 	      inst.reloc.type = entry->ldst_type;
3379 	      inst.reloc.pc_rel = entry->pc_rel;
3380 	    }
3381 	  else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3382 	    {
3383 	      set_syntax_error (_("invalid expression in the address"));
3384 	      return FALSE;
3385 	    }
3386 	  /* [Xn,<expr>  */
3387 	}
3388     }
3389 
3390   if (! skip_past_char (&p, ']'))
3391     {
3392       set_syntax_error (_("']' expected"));
3393       return FALSE;
3394     }
3395 
3396   if (skip_past_char (&p, '!'))
3397     {
3398       if (operand->addr.preind && operand->addr.offset.is_reg)
3399 	{
3400 	  set_syntax_error (_("register offset not allowed in pre-indexed "
3401 			      "addressing mode"));
3402 	  return FALSE;
3403 	}
3404       /* [Xn]! */
3405       operand->addr.writeback = 1;
3406     }
3407   else if (skip_past_comma (&p))
3408     {
3409       /* [Xn], */
3410       operand->addr.postind = 1;
3411       operand->addr.writeback = 1;
3412 
3413       if (operand->addr.preind)
3414 	{
3415 	  set_syntax_error (_("cannot combine pre- and post-indexing"));
3416 	  return FALSE;
3417 	}
3418 
3419       if (accept_reg_post_index
3420 	  && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3421 					     &isregzero)) != PARSE_FAIL)
3422 	{
3423 	  /* [Xn],Xm */
3424 	  if (isreg32)
3425 	    {
3426 	      set_syntax_error (_("invalid 32-bit register offset"));
3427 	      return FALSE;
3428 	    }
3429 	  operand->addr.offset.regno = reg;
3430 	  operand->addr.offset.is_reg = 1;
3431 	}
3432       else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3433 	{
3434 	  /* [Xn],#expr */
3435 	  set_syntax_error (_("invalid expression in the address"));
3436 	  return FALSE;
3437 	}
3438     }
3439 
3440   /* If at this point neither .preind nor .postind is set, we have a
3441      bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0].  */
3442   if (operand->addr.preind == 0 && operand->addr.postind == 0)
3443     {
3444       if (operand->addr.writeback)
3445 	{
3446 	  /* Reject [Rn]!   */
3447 	  set_syntax_error (_("missing offset in the pre-indexed address"));
3448 	  return FALSE;
3449 	}
3450       operand->addr.preind = 1;
3451       inst.reloc.exp.X_op = O_constant;
3452       inst.reloc.exp.X_add_number = 0;
3453     }
3454 
3455   *str = p;
3456   return TRUE;
3457 }
3458 
3459 /* Return TRUE on success; otherwise return FALSE.  */
3460 static bfd_boolean
parse_address(char ** str,aarch64_opnd_info * operand,int accept_reg_post_index)3461 parse_address (char **str, aarch64_opnd_info *operand,
3462 	       int accept_reg_post_index)
3463 {
3464   return parse_address_main (str, operand, 0, accept_reg_post_index);
3465 }
3466 
3467 /* Return TRUE on success; otherwise return FALSE.  */
3468 static bfd_boolean
parse_address_reloc(char ** str,aarch64_opnd_info * operand)3469 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3470 {
3471   return parse_address_main (str, operand, 1, 0);
3472 }
3473 
3474 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3475    Return TRUE on success; otherwise return FALSE.  */
3476 static bfd_boolean
parse_half(char ** str,int * internal_fixup_p)3477 parse_half (char **str, int *internal_fixup_p)
3478 {
3479   char *p = *str;
3480 
3481   skip_past_char (&p, '#');
3482 
3483   gas_assert (internal_fixup_p);
3484   *internal_fixup_p = 0;
3485 
3486   if (*p == ':')
3487     {
3488       struct reloc_table_entry *entry;
3489 
3490       /* Try to parse a relocation.  Anything else is an error.  */
3491       ++p;
3492       if (!(entry = find_reloc_table_entry (&p)))
3493 	{
3494 	  set_syntax_error (_("unknown relocation modifier"));
3495 	  return FALSE;
3496 	}
3497 
3498       if (entry->movw_type == 0)
3499 	{
3500 	  set_syntax_error
3501 	    (_("this relocation modifier is not allowed on this instruction"));
3502 	  return FALSE;
3503 	}
3504 
3505       inst.reloc.type = entry->movw_type;
3506     }
3507   else
3508     *internal_fixup_p = 1;
3509 
3510   if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3511     return FALSE;
3512 
3513   *str = p;
3514   return TRUE;
3515 }
3516 
3517 /* Parse an operand for an ADRP instruction:
3518      ADRP <Xd>, <label>
3519    Return TRUE on success; otherwise return FALSE.  */
3520 
3521 static bfd_boolean
parse_adrp(char ** str)3522 parse_adrp (char **str)
3523 {
3524   char *p;
3525 
3526   p = *str;
3527   if (*p == ':')
3528     {
3529       struct reloc_table_entry *entry;
3530 
3531       /* Try to parse a relocation.  Anything else is an error.  */
3532       ++p;
3533       if (!(entry = find_reloc_table_entry (&p)))
3534 	{
3535 	  set_syntax_error (_("unknown relocation modifier"));
3536 	  return FALSE;
3537 	}
3538 
3539       if (entry->adrp_type == 0)
3540 	{
3541 	  set_syntax_error
3542 	    (_("this relocation modifier is not allowed on this instruction"));
3543 	  return FALSE;
3544 	}
3545 
3546       inst.reloc.type = entry->adrp_type;
3547     }
3548   else
3549     inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3550 
3551   inst.reloc.pc_rel = 1;
3552 
3553   if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3554     return FALSE;
3555 
3556   *str = p;
3557   return TRUE;
3558 }
3559 
3560 /* Miscellaneous. */
3561 
3562 /* Parse an option for a preload instruction.  Returns the encoding for the
3563    option, or PARSE_FAIL.  */
3564 
3565 static int
parse_pldop(char ** str)3566 parse_pldop (char **str)
3567 {
3568   char *p, *q;
3569   const struct aarch64_name_value_pair *o;
3570 
3571   p = q = *str;
3572   while (ISALNUM (*q))
3573     q++;
3574 
3575   o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3576   if (!o)
3577     return PARSE_FAIL;
3578 
3579   *str = q;
3580   return o->value;
3581 }
3582 
3583 /* Parse an option for a barrier instruction.  Returns the encoding for the
3584    option, or PARSE_FAIL.  */
3585 
3586 static int
parse_barrier(char ** str)3587 parse_barrier (char **str)
3588 {
3589   char *p, *q;
3590   const asm_barrier_opt *o;
3591 
3592   p = q = *str;
3593   while (ISALPHA (*q))
3594     q++;
3595 
3596   o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3597   if (!o)
3598     return PARSE_FAIL;
3599 
3600   *str = q;
3601   return o->value;
3602 }
3603 
3604 /* Parse an operand for a PSB barrier.  Set *HINT_OPT to the hint-option record
3605    return 0 if successful.  Otherwise return PARSE_FAIL.  */
3606 
3607 static int
parse_barrier_psb(char ** str,const struct aarch64_name_value_pair ** hint_opt)3608 parse_barrier_psb (char **str,
3609 		   const struct aarch64_name_value_pair ** hint_opt)
3610 {
3611   char *p, *q;
3612   const struct aarch64_name_value_pair *o;
3613 
3614   p = q = *str;
3615   while (ISALPHA (*q))
3616     q++;
3617 
3618   o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3619   if (!o)
3620     {
3621       set_fatal_syntax_error
3622 	( _("unknown or missing option to PSB"));
3623       return PARSE_FAIL;
3624     }
3625 
3626   if (o->value != 0x11)
3627     {
3628       /* PSB only accepts option name 'CSYNC'.  */
3629       set_syntax_error
3630 	(_("the specified option is not accepted for PSB"));
3631       return PARSE_FAIL;
3632     }
3633 
3634   *str = q;
3635   *hint_opt = o;
3636   return 0;
3637 }
3638 
3639 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3640    Returns the encoding for the option, or PARSE_FAIL.
3641 
3642    If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3643    implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3644 
3645    If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3646    field, otherwise as a system register.
3647 */
3648 
3649 static int
parse_sys_reg(char ** str,struct hash_control * sys_regs,int imple_defined_p,int pstatefield_p)3650 parse_sys_reg (char **str, struct hash_control *sys_regs,
3651 	       int imple_defined_p, int pstatefield_p)
3652 {
3653   char *p, *q;
3654   char buf[32];
3655   const aarch64_sys_reg *o;
3656   int value;
3657 
3658   p = buf;
3659   for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3660     if (p < buf + 31)
3661       *p++ = TOLOWER (*q);
3662   *p = '\0';
3663   /* Assert that BUF be large enough.  */
3664   gas_assert (p - buf == q - *str);
3665 
3666   o = hash_find (sys_regs, buf);
3667   if (!o)
3668     {
3669       if (!imple_defined_p)
3670 	return PARSE_FAIL;
3671       else
3672 	{
3673 	  /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>.  */
3674 	  unsigned int op0, op1, cn, cm, op2;
3675 
3676 	  if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3677 	      != 5)
3678 	    return PARSE_FAIL;
3679 	  if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3680 	    return PARSE_FAIL;
3681 	  value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3682 	}
3683     }
3684   else
3685     {
3686       if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3687 	as_bad (_("selected processor does not support PSTATE field "
3688 		  "name '%s'"), buf);
3689       if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3690 	as_bad (_("selected processor does not support system register "
3691 		  "name '%s'"), buf);
3692       if (aarch64_sys_reg_deprecated_p (o))
3693 	as_warn (_("system register name '%s' is deprecated and may be "
3694 		   "removed in a future release"), buf);
3695       value = o->value;
3696     }
3697 
3698   *str = q;
3699   return value;
3700 }
3701 
3702 /* Parse a system reg for ic/dc/at/tlbi instructions.  Returns the table entry
3703    for the option, or NULL.  */
3704 
3705 static const aarch64_sys_ins_reg *
parse_sys_ins_reg(char ** str,struct hash_control * sys_ins_regs)3706 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3707 {
3708   char *p, *q;
3709   char buf[32];
3710   const aarch64_sys_ins_reg *o;
3711 
3712   p = buf;
3713   for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3714     if (p < buf + 31)
3715       *p++ = TOLOWER (*q);
3716   *p = '\0';
3717 
3718   o = hash_find (sys_ins_regs, buf);
3719   if (!o)
3720     return NULL;
3721 
3722   if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3723     as_bad (_("selected processor does not support system register "
3724 	      "name '%s'"), buf);
3725 
3726   *str = q;
3727   return o;
3728 }
3729 
3730 #define po_char_or_fail(chr) do {				\
3731     if (! skip_past_char (&str, chr))				\
3732       goto failure;						\
3733 } while (0)
3734 
3735 #define po_reg_or_fail(regtype) do {				\
3736     val = aarch64_reg_parse (&str, regtype, &rtype, NULL);	\
3737     if (val == PARSE_FAIL)					\
3738       {								\
3739 	set_default_error ();					\
3740 	goto failure;						\
3741       }								\
3742   } while (0)
3743 
3744 #define po_int_reg_or_fail(reject_sp, reject_rz) do {		\
3745     val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz,	\
3746                                    &isreg32, &isregzero);	\
3747     if (val == PARSE_FAIL)					\
3748       {								\
3749 	set_default_error ();					\
3750 	goto failure;						\
3751       }								\
3752     info->reg.regno = val;					\
3753     if (isreg32)						\
3754       info->qualifier = AARCH64_OPND_QLF_W;			\
3755     else							\
3756       info->qualifier = AARCH64_OPND_QLF_X;			\
3757   } while (0)
3758 
3759 #define po_imm_nc_or_fail() do {				\
3760     if (! parse_constant_immediate (&str, &val))		\
3761       goto failure;						\
3762   } while (0)
3763 
3764 #define po_imm_or_fail(min, max) do {				\
3765     if (! parse_constant_immediate (&str, &val))		\
3766       goto failure;						\
3767     if (val < min || val > max)					\
3768       {								\
3769 	set_fatal_syntax_error (_("immediate value out of range "\
3770 #min " to "#max));						\
3771 	goto failure;						\
3772       }								\
3773   } while (0)
3774 
3775 #define po_misc_or_fail(expr) do {				\
3776     if (!expr)							\
3777       goto failure;						\
3778   } while (0)
3779 
3780 /* encode the 12-bit imm field of Add/sub immediate */
3781 static inline uint32_t
encode_addsub_imm(uint32_t imm)3782 encode_addsub_imm (uint32_t imm)
3783 {
3784   return imm << 10;
3785 }
3786 
3787 /* encode the shift amount field of Add/sub immediate */
3788 static inline uint32_t
encode_addsub_imm_shift_amount(uint32_t cnt)3789 encode_addsub_imm_shift_amount (uint32_t cnt)
3790 {
3791   return cnt << 22;
3792 }
3793 
3794 
3795 /* encode the imm field of Adr instruction */
3796 static inline uint32_t
encode_adr_imm(uint32_t imm)3797 encode_adr_imm (uint32_t imm)
3798 {
3799   return (((imm & 0x3) << 29)	/*  [1:0] -> [30:29] */
3800 	  | ((imm & (0x7ffff << 2)) << 3));	/* [20:2] -> [23:5]  */
3801 }
3802 
3803 /* encode the immediate field of Move wide immediate */
3804 static inline uint32_t
encode_movw_imm(uint32_t imm)3805 encode_movw_imm (uint32_t imm)
3806 {
3807   return imm << 5;
3808 }
3809 
3810 /* encode the 26-bit offset of unconditional branch */
3811 static inline uint32_t
encode_branch_ofs_26(uint32_t ofs)3812 encode_branch_ofs_26 (uint32_t ofs)
3813 {
3814   return ofs & ((1 << 26) - 1);
3815 }
3816 
3817 /* encode the 19-bit offset of conditional branch and compare & branch */
3818 static inline uint32_t
encode_cond_branch_ofs_19(uint32_t ofs)3819 encode_cond_branch_ofs_19 (uint32_t ofs)
3820 {
3821   return (ofs & ((1 << 19) - 1)) << 5;
3822 }
3823 
3824 /* encode the 19-bit offset of ld literal */
3825 static inline uint32_t
encode_ld_lit_ofs_19(uint32_t ofs)3826 encode_ld_lit_ofs_19 (uint32_t ofs)
3827 {
3828   return (ofs & ((1 << 19) - 1)) << 5;
3829 }
3830 
3831 /* Encode the 14-bit offset of test & branch.  */
3832 static inline uint32_t
encode_tst_branch_ofs_14(uint32_t ofs)3833 encode_tst_branch_ofs_14 (uint32_t ofs)
3834 {
3835   return (ofs & ((1 << 14) - 1)) << 5;
3836 }
3837 
3838 /* Encode the 16-bit imm field of svc/hvc/smc.  */
3839 static inline uint32_t
encode_svc_imm(uint32_t imm)3840 encode_svc_imm (uint32_t imm)
3841 {
3842   return imm << 5;
3843 }
3844 
3845 /* Reencode add(s) to sub(s), or sub(s) to add(s).  */
3846 static inline uint32_t
reencode_addsub_switch_add_sub(uint32_t opcode)3847 reencode_addsub_switch_add_sub (uint32_t opcode)
3848 {
3849   return opcode ^ (1 << 30);
3850 }
3851 
3852 static inline uint32_t
reencode_movzn_to_movz(uint32_t opcode)3853 reencode_movzn_to_movz (uint32_t opcode)
3854 {
3855   return opcode | (1 << 30);
3856 }
3857 
3858 static inline uint32_t
reencode_movzn_to_movn(uint32_t opcode)3859 reencode_movzn_to_movn (uint32_t opcode)
3860 {
3861   return opcode & ~(1 << 30);
3862 }
3863 
3864 /* Overall per-instruction processing.	*/
3865 
3866 /* We need to be able to fix up arbitrary expressions in some statements.
3867    This is so that we can handle symbols that are an arbitrary distance from
3868    the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3869    which returns part of an address in a form which will be valid for
3870    a data instruction.	We do this by pushing the expression into a symbol
3871    in the expr_section, and creating a fix for that.  */
3872 
3873 static fixS *
fix_new_aarch64(fragS * frag,int where,short int size,expressionS * exp,int pc_rel,int reloc)3874 fix_new_aarch64 (fragS * frag,
3875 		 int where,
3876 		 short int size, expressionS * exp, int pc_rel, int reloc)
3877 {
3878   fixS *new_fix;
3879 
3880   switch (exp->X_op)
3881     {
3882     case O_constant:
3883     case O_symbol:
3884     case O_add:
3885     case O_subtract:
3886       new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3887       break;
3888 
3889     default:
3890       new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3891 			 pc_rel, reloc);
3892       break;
3893     }
3894   return new_fix;
3895 }
3896 
3897 /* Diagnostics on operands errors.  */
3898 
3899 /* By default, output verbose error message.
3900    Disable the verbose error message by -mno-verbose-error.  */
3901 static int verbose_error_p = 1;
3902 
3903 #ifdef DEBUG_AARCH64
3904 /* N.B. this is only for the purpose of debugging.  */
3905 const char* operand_mismatch_kind_names[] =
3906 {
3907   "AARCH64_OPDE_NIL",
3908   "AARCH64_OPDE_RECOVERABLE",
3909   "AARCH64_OPDE_SYNTAX_ERROR",
3910   "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3911   "AARCH64_OPDE_INVALID_VARIANT",
3912   "AARCH64_OPDE_OUT_OF_RANGE",
3913   "AARCH64_OPDE_UNALIGNED",
3914   "AARCH64_OPDE_REG_LIST",
3915   "AARCH64_OPDE_OTHER_ERROR",
3916 };
3917 #endif /* DEBUG_AARCH64 */
3918 
3919 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3920 
3921    When multiple errors of different kinds are found in the same assembly
3922    line, only the error of the highest severity will be picked up for
3923    issuing the diagnostics.  */
3924 
3925 static inline bfd_boolean
operand_error_higher_severity_p(enum aarch64_operand_error_kind lhs,enum aarch64_operand_error_kind rhs)3926 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3927 				 enum aarch64_operand_error_kind rhs)
3928 {
3929   gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3930   gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3931   gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3932   gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3933   gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3934   gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3935   gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3936   gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3937   return lhs > rhs;
3938 }
3939 
3940 /* Helper routine to get the mnemonic name from the assembly instruction
3941    line; should only be called for the diagnosis purpose, as there is
3942    string copy operation involved, which may affect the runtime
3943    performance if used in elsewhere.  */
3944 
3945 static const char*
get_mnemonic_name(const char * str)3946 get_mnemonic_name (const char *str)
3947 {
3948   static char mnemonic[32];
3949   char *ptr;
3950 
3951   /* Get the first 15 bytes and assume that the full name is included.  */
3952   strncpy (mnemonic, str, 31);
3953   mnemonic[31] = '\0';
3954 
3955   /* Scan up to the end of the mnemonic, which must end in white space,
3956      '.', or end of string.  */
3957   for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3958     ;
3959 
3960   *ptr = '\0';
3961 
3962   /* Append '...' to the truncated long name.  */
3963   if (ptr - mnemonic == 31)
3964     mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3965 
3966   return mnemonic;
3967 }
3968 
3969 static void
reset_aarch64_instruction(aarch64_instruction * instruction)3970 reset_aarch64_instruction (aarch64_instruction *instruction)
3971 {
3972   memset (instruction, '\0', sizeof (aarch64_instruction));
3973   instruction->reloc.type = BFD_RELOC_UNUSED;
3974 }
3975 
3976 /* Data strutures storing one user error in the assembly code related to
3977    operands.  */
3978 
3979 struct operand_error_record
3980 {
3981   const aarch64_opcode *opcode;
3982   aarch64_operand_error detail;
3983   struct operand_error_record *next;
3984 };
3985 
3986 typedef struct operand_error_record operand_error_record;
3987 
3988 struct operand_errors
3989 {
3990   operand_error_record *head;
3991   operand_error_record *tail;
3992 };
3993 
3994 typedef struct operand_errors operand_errors;
3995 
3996 /* Top-level data structure reporting user errors for the current line of
3997    the assembly code.
3998    The way md_assemble works is that all opcodes sharing the same mnemonic
3999    name are iterated to find a match to the assembly line.  In this data
4000    structure, each of the such opcodes will have one operand_error_record
4001    allocated and inserted.  In other words, excessive errors related with
4002    a single opcode are disregarded.  */
4003 operand_errors operand_error_report;
4004 
4005 /* Free record nodes.  */
4006 static operand_error_record *free_opnd_error_record_nodes = NULL;
4007 
4008 /* Initialize the data structure that stores the operand mismatch
4009    information on assembling one line of the assembly code.  */
4010 static void
init_operand_error_report(void)4011 init_operand_error_report (void)
4012 {
4013   if (operand_error_report.head != NULL)
4014     {
4015       gas_assert (operand_error_report.tail != NULL);
4016       operand_error_report.tail->next = free_opnd_error_record_nodes;
4017       free_opnd_error_record_nodes = operand_error_report.head;
4018       operand_error_report.head = NULL;
4019       operand_error_report.tail = NULL;
4020       return;
4021     }
4022   gas_assert (operand_error_report.tail == NULL);
4023 }
4024 
4025 /* Return TRUE if some operand error has been recorded during the
4026    parsing of the current assembly line using the opcode *OPCODE;
4027    otherwise return FALSE.  */
4028 static inline bfd_boolean
opcode_has_operand_error_p(const aarch64_opcode * opcode)4029 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4030 {
4031   operand_error_record *record = operand_error_report.head;
4032   return record && record->opcode == opcode;
4033 }
4034 
4035 /* Add the error record *NEW_RECORD to operand_error_report.  The record's
4036    OPCODE field is initialized with OPCODE.
4037    N.B. only one record for each opcode, i.e. the maximum of one error is
4038    recorded for each instruction template.  */
4039 
4040 static void
add_operand_error_record(const operand_error_record * new_record)4041 add_operand_error_record (const operand_error_record* new_record)
4042 {
4043   const aarch64_opcode *opcode = new_record->opcode;
4044   operand_error_record* record = operand_error_report.head;
4045 
4046   /* The record may have been created for this opcode.  If not, we need
4047      to prepare one.  */
4048   if (! opcode_has_operand_error_p (opcode))
4049     {
4050       /* Get one empty record.  */
4051       if (free_opnd_error_record_nodes == NULL)
4052 	{
4053 	  record = XNEW (operand_error_record);
4054 	}
4055       else
4056 	{
4057 	  record = free_opnd_error_record_nodes;
4058 	  free_opnd_error_record_nodes = record->next;
4059 	}
4060       record->opcode = opcode;
4061       /* Insert at the head.  */
4062       record->next = operand_error_report.head;
4063       operand_error_report.head = record;
4064       if (operand_error_report.tail == NULL)
4065 	operand_error_report.tail = record;
4066     }
4067   else if (record->detail.kind != AARCH64_OPDE_NIL
4068 	   && record->detail.index <= new_record->detail.index
4069 	   && operand_error_higher_severity_p (record->detail.kind,
4070 					       new_record->detail.kind))
4071     {
4072       /* In the case of multiple errors found on operands related with a
4073 	 single opcode, only record the error of the leftmost operand and
4074 	 only if the error is of higher severity.  */
4075       DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4076 		   " the existing error %s on operand %d",
4077 		   operand_mismatch_kind_names[new_record->detail.kind],
4078 		   new_record->detail.index,
4079 		   operand_mismatch_kind_names[record->detail.kind],
4080 		   record->detail.index);
4081       return;
4082     }
4083 
4084   record->detail = new_record->detail;
4085 }
4086 
4087 static inline void
record_operand_error_info(const aarch64_opcode * opcode,aarch64_operand_error * error_info)4088 record_operand_error_info (const aarch64_opcode *opcode,
4089 			   aarch64_operand_error *error_info)
4090 {
4091   operand_error_record record;
4092   record.opcode = opcode;
4093   record.detail = *error_info;
4094   add_operand_error_record (&record);
4095 }
4096 
4097 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4098    error message *ERROR, for operand IDX (count from 0).  */
4099 
4100 static void
record_operand_error(const aarch64_opcode * opcode,int idx,enum aarch64_operand_error_kind kind,const char * error)4101 record_operand_error (const aarch64_opcode *opcode, int idx,
4102 		      enum aarch64_operand_error_kind kind,
4103 		      const char* error)
4104 {
4105   aarch64_operand_error info;
4106   memset(&info, 0, sizeof (info));
4107   info.index = idx;
4108   info.kind = kind;
4109   info.error = error;
4110   record_operand_error_info (opcode, &info);
4111 }
4112 
4113 static void
record_operand_error_with_data(const aarch64_opcode * opcode,int idx,enum aarch64_operand_error_kind kind,const char * error,const int * extra_data)4114 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4115 				enum aarch64_operand_error_kind kind,
4116 				const char* error, const int *extra_data)
4117 {
4118   aarch64_operand_error info;
4119   info.index = idx;
4120   info.kind = kind;
4121   info.error = error;
4122   info.data[0] = extra_data[0];
4123   info.data[1] = extra_data[1];
4124   info.data[2] = extra_data[2];
4125   record_operand_error_info (opcode, &info);
4126 }
4127 
4128 static void
record_operand_out_of_range_error(const aarch64_opcode * opcode,int idx,const char * error,int lower_bound,int upper_bound)4129 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4130 				   const char* error, int lower_bound,
4131 				   int upper_bound)
4132 {
4133   int data[3] = {lower_bound, upper_bound, 0};
4134   record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4135 				  error, data);
4136 }
4137 
4138 /* Remove the operand error record for *OPCODE.  */
4139 static void ATTRIBUTE_UNUSED
remove_operand_error_record(const aarch64_opcode * opcode)4140 remove_operand_error_record (const aarch64_opcode *opcode)
4141 {
4142   if (opcode_has_operand_error_p (opcode))
4143     {
4144       operand_error_record* record = operand_error_report.head;
4145       gas_assert (record != NULL && operand_error_report.tail != NULL);
4146       operand_error_report.head = record->next;
4147       record->next = free_opnd_error_record_nodes;
4148       free_opnd_error_record_nodes = record;
4149       if (operand_error_report.head == NULL)
4150 	{
4151 	  gas_assert (operand_error_report.tail == record);
4152 	  operand_error_report.tail = NULL;
4153 	}
4154     }
4155 }
4156 
4157 /* Given the instruction in *INSTR, return the index of the best matched
4158    qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4159 
4160    Return -1 if there is no qualifier sequence; return the first match
4161    if there is multiple matches found.  */
4162 
4163 static int
find_best_match(const aarch64_inst * instr,const aarch64_opnd_qualifier_seq_t * qualifiers_list)4164 find_best_match (const aarch64_inst *instr,
4165 		 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4166 {
4167   int i, num_opnds, max_num_matched, idx;
4168 
4169   num_opnds = aarch64_num_of_operands (instr->opcode);
4170   if (num_opnds == 0)
4171     {
4172       DEBUG_TRACE ("no operand");
4173       return -1;
4174     }
4175 
4176   max_num_matched = 0;
4177   idx = -1;
4178 
4179   /* For each pattern.  */
4180   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4181     {
4182       int j, num_matched;
4183       const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4184 
4185       /* Most opcodes has much fewer patterns in the list.  */
4186       if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4187 	{
4188 	  DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4189 	  if (i != 0 && idx == -1)
4190 	    /* If nothing has been matched, return the 1st sequence.  */
4191 	    idx = 0;
4192 	  break;
4193 	}
4194 
4195       for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4196 	if (*qualifiers == instr->operands[j].qualifier)
4197 	  ++num_matched;
4198 
4199       if (num_matched > max_num_matched)
4200 	{
4201 	  max_num_matched = num_matched;
4202 	  idx = i;
4203 	}
4204     }
4205 
4206   DEBUG_TRACE ("return with %d", idx);
4207   return idx;
4208 }
4209 
4210 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4211    corresponding operands in *INSTR.  */
4212 
4213 static inline void
assign_qualifier_sequence(aarch64_inst * instr,const aarch64_opnd_qualifier_t * qualifiers)4214 assign_qualifier_sequence (aarch64_inst *instr,
4215 			   const aarch64_opnd_qualifier_t *qualifiers)
4216 {
4217   int i = 0;
4218   int num_opnds = aarch64_num_of_operands (instr->opcode);
4219   gas_assert (num_opnds);
4220   for (i = 0; i < num_opnds; ++i, ++qualifiers)
4221     instr->operands[i].qualifier = *qualifiers;
4222 }
4223 
4224 /* Print operands for the diagnosis purpose.  */
4225 
4226 static void
print_operands(char * buf,const aarch64_opcode * opcode,const aarch64_opnd_info * opnds)4227 print_operands (char *buf, const aarch64_opcode *opcode,
4228 		const aarch64_opnd_info *opnds)
4229 {
4230   int i;
4231 
4232   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4233     {
4234       char str[128];
4235 
4236       /* We regard the opcode operand info more, however we also look into
4237 	 the inst->operands to support the disassembling of the optional
4238 	 operand.
4239 	 The two operand code should be the same in all cases, apart from
4240 	 when the operand can be optional.  */
4241       if (opcode->operands[i] == AARCH64_OPND_NIL
4242 	  || opnds[i].type == AARCH64_OPND_NIL)
4243 	break;
4244 
4245       /* Generate the operand string in STR.  */
4246       aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4247 
4248       /* Delimiter.  */
4249       if (str[0] != '\0')
4250 	strcat (buf, i == 0 ? " " : ",");
4251 
4252       /* Append the operand string.  */
4253       strcat (buf, str);
4254     }
4255 }
4256 
4257 /* Send to stderr a string as information.  */
4258 
4259 static void
output_info(const char * format,...)4260 output_info (const char *format, ...)
4261 {
4262   const char *file;
4263   unsigned int line;
4264   va_list args;
4265 
4266   file = as_where (&line);
4267   if (file)
4268     {
4269       if (line != 0)
4270 	fprintf (stderr, "%s:%u: ", file, line);
4271       else
4272 	fprintf (stderr, "%s: ", file);
4273     }
4274   fprintf (stderr, _("Info: "));
4275   va_start (args, format);
4276   vfprintf (stderr, format, args);
4277   va_end (args);
4278   (void) putc ('\n', stderr);
4279 }
4280 
4281 /* Output one operand error record.  */
4282 
4283 static void
output_operand_error_record(const operand_error_record * record,char * str)4284 output_operand_error_record (const operand_error_record *record, char *str)
4285 {
4286   const aarch64_operand_error *detail = &record->detail;
4287   int idx = detail->index;
4288   const aarch64_opcode *opcode = record->opcode;
4289   enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4290 				: AARCH64_OPND_NIL);
4291 
4292   switch (detail->kind)
4293     {
4294     case AARCH64_OPDE_NIL:
4295       gas_assert (0);
4296       break;
4297 
4298     case AARCH64_OPDE_SYNTAX_ERROR:
4299     case AARCH64_OPDE_RECOVERABLE:
4300     case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4301     case AARCH64_OPDE_OTHER_ERROR:
4302       /* Use the prepared error message if there is, otherwise use the
4303 	 operand description string to describe the error.  */
4304       if (detail->error != NULL)
4305 	{
4306 	  if (idx < 0)
4307 	    as_bad (_("%s -- `%s'"), detail->error, str);
4308 	  else
4309 	    as_bad (_("%s at operand %d -- `%s'"),
4310 		    detail->error, idx + 1, str);
4311 	}
4312       else
4313 	{
4314 	  gas_assert (idx >= 0);
4315 	  as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4316 		aarch64_get_operand_desc (opd_code), str);
4317 	}
4318       break;
4319 
4320     case AARCH64_OPDE_INVALID_VARIANT:
4321       as_bad (_("operand mismatch -- `%s'"), str);
4322       if (verbose_error_p)
4323 	{
4324 	  /* We will try to correct the erroneous instruction and also provide
4325 	     more information e.g. all other valid variants.
4326 
4327 	     The string representation of the corrected instruction and other
4328 	     valid variants are generated by
4329 
4330 	     1) obtaining the intermediate representation of the erroneous
4331 	     instruction;
4332 	     2) manipulating the IR, e.g. replacing the operand qualifier;
4333 	     3) printing out the instruction by calling the printer functions
4334 	     shared with the disassembler.
4335 
4336 	     The limitation of this method is that the exact input assembly
4337 	     line cannot be accurately reproduced in some cases, for example an
4338 	     optional operand present in the actual assembly line will be
4339 	     omitted in the output; likewise for the optional syntax rules,
4340 	     e.g. the # before the immediate.  Another limitation is that the
4341 	     assembly symbols and relocation operations in the assembly line
4342 	     currently cannot be printed out in the error report.  Last but not
4343 	     least, when there is other error(s) co-exist with this error, the
4344 	     'corrected' instruction may be still incorrect, e.g.  given
4345 	       'ldnp h0,h1,[x0,#6]!'
4346 	     this diagnosis will provide the version:
4347 	       'ldnp s0,s1,[x0,#6]!'
4348 	     which is still not right.  */
4349 	  size_t len = strlen (get_mnemonic_name (str));
4350 	  int i, qlf_idx;
4351 	  bfd_boolean result;
4352 	  char buf[2048];
4353 	  aarch64_inst *inst_base = &inst.base;
4354 	  const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4355 
4356 	  /* Init inst.  */
4357 	  reset_aarch64_instruction (&inst);
4358 	  inst_base->opcode = opcode;
4359 
4360 	  /* Reset the error report so that there is no side effect on the
4361 	     following operand parsing.  */
4362 	  init_operand_error_report ();
4363 
4364 	  /* Fill inst.  */
4365 	  result = parse_operands (str + len, opcode)
4366 	    && programmer_friendly_fixup (&inst);
4367 	  gas_assert (result);
4368 	  result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4369 					  NULL, NULL);
4370 	  gas_assert (!result);
4371 
4372 	  /* Find the most matched qualifier sequence.  */
4373 	  qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4374 	  gas_assert (qlf_idx > -1);
4375 
4376 	  /* Assign the qualifiers.  */
4377 	  assign_qualifier_sequence (inst_base,
4378 				     opcode->qualifiers_list[qlf_idx]);
4379 
4380 	  /* Print the hint.  */
4381 	  output_info (_("   did you mean this?"));
4382 	  snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4383 	  print_operands (buf, opcode, inst_base->operands);
4384 	  output_info (_("   %s"), buf);
4385 
4386 	  /* Print out other variant(s) if there is any.  */
4387 	  if (qlf_idx != 0 ||
4388 	      !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4389 	    output_info (_("   other valid variant(s):"));
4390 
4391 	  /* For each pattern.  */
4392 	  qualifiers_list = opcode->qualifiers_list;
4393 	  for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4394 	    {
4395 	      /* Most opcodes has much fewer patterns in the list.
4396 		 First NIL qualifier indicates the end in the list.   */
4397 	      if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4398 		break;
4399 
4400 	      if (i != qlf_idx)
4401 		{
4402 		  /* Mnemonics name.  */
4403 		  snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4404 
4405 		  /* Assign the qualifiers.  */
4406 		  assign_qualifier_sequence (inst_base, *qualifiers_list);
4407 
4408 		  /* Print instruction.  */
4409 		  print_operands (buf, opcode, inst_base->operands);
4410 
4411 		  output_info (_("   %s"), buf);
4412 		}
4413 	    }
4414 	}
4415       break;
4416 
4417     case AARCH64_OPDE_OUT_OF_RANGE:
4418       if (detail->data[0] != detail->data[1])
4419 	as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4420 		detail->error ? detail->error : _("immediate value"),
4421 		detail->data[0], detail->data[1], idx + 1, str);
4422       else
4423 	as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4424 		detail->error ? detail->error : _("immediate value"),
4425 		detail->data[0], idx + 1, str);
4426       break;
4427 
4428     case AARCH64_OPDE_REG_LIST:
4429       if (detail->data[0] == 1)
4430 	as_bad (_("invalid number of registers in the list; "
4431 		  "only 1 register is expected at operand %d -- `%s'"),
4432 		idx + 1, str);
4433       else
4434 	as_bad (_("invalid number of registers in the list; "
4435 		  "%d registers are expected at operand %d -- `%s'"),
4436 	      detail->data[0], idx + 1, str);
4437       break;
4438 
4439     case AARCH64_OPDE_UNALIGNED:
4440       as_bad (_("immediate value should be a multiple of "
4441 		"%d at operand %d -- `%s'"),
4442 	      detail->data[0], idx + 1, str);
4443       break;
4444 
4445     default:
4446       gas_assert (0);
4447       break;
4448     }
4449 }
4450 
4451 /* Process and output the error message about the operand mismatching.
4452 
4453    When this function is called, the operand error information had
4454    been collected for an assembly line and there will be multiple
4455    errors in the case of mulitple instruction templates; output the
4456    error message that most closely describes the problem.  */
4457 
4458 static void
output_operand_error_report(char * str)4459 output_operand_error_report (char *str)
4460 {
4461   int largest_error_pos;
4462   const char *msg = NULL;
4463   enum aarch64_operand_error_kind kind;
4464   operand_error_record *curr;
4465   operand_error_record *head = operand_error_report.head;
4466   operand_error_record *record = NULL;
4467 
4468   /* No error to report.  */
4469   if (head == NULL)
4470     return;
4471 
4472   gas_assert (head != NULL && operand_error_report.tail != NULL);
4473 
4474   /* Only one error.  */
4475   if (head == operand_error_report.tail)
4476     {
4477       DEBUG_TRACE ("single opcode entry with error kind: %s",
4478 		   operand_mismatch_kind_names[head->detail.kind]);
4479       output_operand_error_record (head, str);
4480       return;
4481     }
4482 
4483   /* Find the error kind of the highest severity.  */
4484   DEBUG_TRACE ("multiple opcode entres with error kind");
4485   kind = AARCH64_OPDE_NIL;
4486   for (curr = head; curr != NULL; curr = curr->next)
4487     {
4488       gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4489       DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4490       if (operand_error_higher_severity_p (curr->detail.kind, kind))
4491 	kind = curr->detail.kind;
4492     }
4493   gas_assert (kind != AARCH64_OPDE_NIL);
4494 
4495   /* Pick up one of errors of KIND to report.  */
4496   largest_error_pos = -2; /* Index can be -1 which means unknown index.  */
4497   for (curr = head; curr != NULL; curr = curr->next)
4498     {
4499       if (curr->detail.kind != kind)
4500 	continue;
4501       /* If there are multiple errors, pick up the one with the highest
4502 	 mismatching operand index.  In the case of multiple errors with
4503 	 the equally highest operand index, pick up the first one or the
4504 	 first one with non-NULL error message.  */
4505       if (curr->detail.index > largest_error_pos
4506 	  || (curr->detail.index == largest_error_pos && msg == NULL
4507 	      && curr->detail.error != NULL))
4508 	{
4509 	  largest_error_pos = curr->detail.index;
4510 	  record = curr;
4511 	  msg = record->detail.error;
4512 	}
4513     }
4514 
4515   gas_assert (largest_error_pos != -2 && record != NULL);
4516   DEBUG_TRACE ("Pick up error kind %s to report",
4517 	       operand_mismatch_kind_names[record->detail.kind]);
4518 
4519   /* Output.  */
4520   output_operand_error_record (record, str);
4521 }
4522 
4523 /* Write an AARCH64 instruction to buf - always little-endian.  */
4524 static void
put_aarch64_insn(char * buf,uint32_t insn)4525 put_aarch64_insn (char *buf, uint32_t insn)
4526 {
4527   unsigned char *where = (unsigned char *) buf;
4528   where[0] = insn;
4529   where[1] = insn >> 8;
4530   where[2] = insn >> 16;
4531   where[3] = insn >> 24;
4532 }
4533 
4534 static uint32_t
get_aarch64_insn(char * buf)4535 get_aarch64_insn (char *buf)
4536 {
4537   unsigned char *where = (unsigned char *) buf;
4538   uint32_t result;
4539   result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4540   return result;
4541 }
4542 
4543 static void
output_inst(struct aarch64_inst * new_inst)4544 output_inst (struct aarch64_inst *new_inst)
4545 {
4546   char *to = NULL;
4547 
4548   to = frag_more (INSN_SIZE);
4549 
4550   frag_now->tc_frag_data.recorded = 1;
4551 
4552   put_aarch64_insn (to, inst.base.value);
4553 
4554   if (inst.reloc.type != BFD_RELOC_UNUSED)
4555     {
4556       fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4557 				    INSN_SIZE, &inst.reloc.exp,
4558 				    inst.reloc.pc_rel,
4559 				    inst.reloc.type);
4560       DEBUG_TRACE ("Prepared relocation fix up");
4561       /* Don't check the addend value against the instruction size,
4562          that's the job of our code in md_apply_fix(). */
4563       fixp->fx_no_overflow = 1;
4564       if (new_inst != NULL)
4565 	fixp->tc_fix_data.inst = new_inst;
4566       if (aarch64_gas_internal_fixup_p ())
4567 	{
4568 	  gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4569 	  fixp->tc_fix_data.opnd = inst.reloc.opnd;
4570 	  fixp->fx_addnumber = inst.reloc.flags;
4571 	}
4572     }
4573 
4574   dwarf2_emit_insn (INSN_SIZE);
4575 }
4576 
4577 /* Link together opcodes of the same name.  */
4578 
4579 struct templates
4580 {
4581   aarch64_opcode *opcode;
4582   struct templates *next;
4583 };
4584 
4585 typedef struct templates templates;
4586 
4587 static templates *
lookup_mnemonic(const char * start,int len)4588 lookup_mnemonic (const char *start, int len)
4589 {
4590   templates *templ = NULL;
4591 
4592   templ = hash_find_n (aarch64_ops_hsh, start, len);
4593   return templ;
4594 }
4595 
4596 /* Subroutine of md_assemble, responsible for looking up the primary
4597    opcode from the mnemonic the user wrote.  STR points to the
4598    beginning of the mnemonic. */
4599 
4600 static templates *
opcode_lookup(char ** str)4601 opcode_lookup (char **str)
4602 {
4603   char *end, *base;
4604   const aarch64_cond *cond;
4605   char condname[16];
4606   int len;
4607 
4608   /* Scan up to the end of the mnemonic, which must end in white space,
4609      '.', or end of string.  */
4610   for (base = end = *str; is_part_of_name(*end); end++)
4611     if (*end == '.')
4612       break;
4613 
4614   if (end == base)
4615     return 0;
4616 
4617   inst.cond = COND_ALWAYS;
4618 
4619   /* Handle a possible condition.  */
4620   if (end[0] == '.')
4621     {
4622       cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4623       if (cond)
4624 	{
4625 	  inst.cond = cond->value;
4626 	  *str = end + 3;
4627 	}
4628       else
4629 	{
4630 	  *str = end;
4631 	  return 0;
4632 	}
4633     }
4634   else
4635     *str = end;
4636 
4637   len = end - base;
4638 
4639   if (inst.cond == COND_ALWAYS)
4640     {
4641       /* Look for unaffixed mnemonic.  */
4642       return lookup_mnemonic (base, len);
4643     }
4644   else if (len <= 13)
4645     {
4646       /* append ".c" to mnemonic if conditional */
4647       memcpy (condname, base, len);
4648       memcpy (condname + len, ".c", 2);
4649       base = condname;
4650       len += 2;
4651       return lookup_mnemonic (base, len);
4652     }
4653 
4654   return NULL;
4655 }
4656 
4657 /* Internal helper routine converting a vector neon_type_el structure
4658    *VECTYPE to a corresponding operand qualifier.  */
4659 
4660 static inline aarch64_opnd_qualifier_t
vectype_to_qualifier(const struct neon_type_el * vectype)4661 vectype_to_qualifier (const struct neon_type_el *vectype)
4662 {
4663   /* Element size in bytes indexed by neon_el_type.  */
4664   const unsigned char ele_size[5]
4665     = {1, 2, 4, 8, 16};
4666   const unsigned int ele_base [5] =
4667     {
4668       AARCH64_OPND_QLF_V_8B,
4669       AARCH64_OPND_QLF_V_2H,
4670       AARCH64_OPND_QLF_V_2S,
4671       AARCH64_OPND_QLF_V_1D,
4672       AARCH64_OPND_QLF_V_1Q
4673   };
4674 
4675   if (!vectype->defined || vectype->type == NT_invtype)
4676     goto vectype_conversion_fail;
4677 
4678   gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4679 
4680   if (vectype->defined & NTA_HASINDEX)
4681     /* Vector element register.  */
4682     return AARCH64_OPND_QLF_S_B + vectype->type;
4683   else
4684     {
4685       /* Vector register.  */
4686       int reg_size = ele_size[vectype->type] * vectype->width;
4687       unsigned offset;
4688       unsigned shift;
4689       if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4690 	goto vectype_conversion_fail;
4691 
4692       /* The conversion is by calculating the offset from the base operand
4693 	 qualifier for the vector type.  The operand qualifiers are regular
4694 	 enough that the offset can established by shifting the vector width by
4695 	 a vector-type dependent amount.  */
4696       shift = 0;
4697       if (vectype->type == NT_b)
4698 	shift = 4;
4699       else if (vectype->type == NT_h || vectype->type == NT_s)
4700 	shift = 2;
4701       else if (vectype->type >= NT_d)
4702 	shift = 1;
4703       else
4704 	gas_assert (0);
4705 
4706       offset = ele_base [vectype->type] + (vectype->width >> shift);
4707       gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4708 		  && offset <= AARCH64_OPND_QLF_V_1Q);
4709       return offset;
4710     }
4711 
4712 vectype_conversion_fail:
4713   first_error (_("bad vector arrangement type"));
4714   return AARCH64_OPND_QLF_NIL;
4715 }
4716 
4717 /* Process an optional operand that is found omitted from the assembly line.
4718    Fill *OPERAND for such an operand of type TYPE.  OPCODE points to the
4719    instruction's opcode entry while IDX is the index of this omitted operand.
4720    */
4721 
4722 static void
process_omitted_operand(enum aarch64_opnd type,const aarch64_opcode * opcode,int idx,aarch64_opnd_info * operand)4723 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4724 			 int idx, aarch64_opnd_info *operand)
4725 {
4726   aarch64_insn default_value = get_optional_operand_default_value (opcode);
4727   gas_assert (optional_operand_p (opcode, idx));
4728   gas_assert (!operand->present);
4729 
4730   switch (type)
4731     {
4732     case AARCH64_OPND_Rd:
4733     case AARCH64_OPND_Rn:
4734     case AARCH64_OPND_Rm:
4735     case AARCH64_OPND_Rt:
4736     case AARCH64_OPND_Rt2:
4737     case AARCH64_OPND_Rs:
4738     case AARCH64_OPND_Ra:
4739     case AARCH64_OPND_Rt_SYS:
4740     case AARCH64_OPND_Rd_SP:
4741     case AARCH64_OPND_Rn_SP:
4742     case AARCH64_OPND_Fd:
4743     case AARCH64_OPND_Fn:
4744     case AARCH64_OPND_Fm:
4745     case AARCH64_OPND_Fa:
4746     case AARCH64_OPND_Ft:
4747     case AARCH64_OPND_Ft2:
4748     case AARCH64_OPND_Sd:
4749     case AARCH64_OPND_Sn:
4750     case AARCH64_OPND_Sm:
4751     case AARCH64_OPND_Vd:
4752     case AARCH64_OPND_Vn:
4753     case AARCH64_OPND_Vm:
4754     case AARCH64_OPND_VdD1:
4755     case AARCH64_OPND_VnD1:
4756       operand->reg.regno = default_value;
4757       break;
4758 
4759     case AARCH64_OPND_Ed:
4760     case AARCH64_OPND_En:
4761     case AARCH64_OPND_Em:
4762       operand->reglane.regno = default_value;
4763       break;
4764 
4765     case AARCH64_OPND_IDX:
4766     case AARCH64_OPND_BIT_NUM:
4767     case AARCH64_OPND_IMMR:
4768     case AARCH64_OPND_IMMS:
4769     case AARCH64_OPND_SHLL_IMM:
4770     case AARCH64_OPND_IMM_VLSL:
4771     case AARCH64_OPND_IMM_VLSR:
4772     case AARCH64_OPND_CCMP_IMM:
4773     case AARCH64_OPND_FBITS:
4774     case AARCH64_OPND_UIMM4:
4775     case AARCH64_OPND_UIMM3_OP1:
4776     case AARCH64_OPND_UIMM3_OP2:
4777     case AARCH64_OPND_IMM:
4778     case AARCH64_OPND_WIDTH:
4779     case AARCH64_OPND_UIMM7:
4780     case AARCH64_OPND_NZCV:
4781       operand->imm.value = default_value;
4782       break;
4783 
4784     case AARCH64_OPND_EXCEPTION:
4785       inst.reloc.type = BFD_RELOC_UNUSED;
4786       break;
4787 
4788     case AARCH64_OPND_BARRIER_ISB:
4789       operand->barrier = aarch64_barrier_options + default_value;
4790 
4791     default:
4792       break;
4793     }
4794 }
4795 
4796 /* Process the relocation type for move wide instructions.
4797    Return TRUE on success; otherwise return FALSE.  */
4798 
4799 static bfd_boolean
process_movw_reloc_info(void)4800 process_movw_reloc_info (void)
4801 {
4802   int is32;
4803   unsigned shift;
4804 
4805   is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4806 
4807   if (inst.base.opcode->op == OP_MOVK)
4808     switch (inst.reloc.type)
4809       {
4810       case BFD_RELOC_AARCH64_MOVW_G0_S:
4811       case BFD_RELOC_AARCH64_MOVW_G1_S:
4812       case BFD_RELOC_AARCH64_MOVW_G2_S:
4813       case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4814       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4815       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4816       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4817 	set_syntax_error
4818 	  (_("the specified relocation type is not allowed for MOVK"));
4819 	return FALSE;
4820       default:
4821 	break;
4822       }
4823 
4824   switch (inst.reloc.type)
4825     {
4826     case BFD_RELOC_AARCH64_MOVW_G0:
4827     case BFD_RELOC_AARCH64_MOVW_G0_NC:
4828     case BFD_RELOC_AARCH64_MOVW_G0_S:
4829     case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4830     case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4831     case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4832     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
4833     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
4834     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
4835     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4836     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4837       shift = 0;
4838       break;
4839     case BFD_RELOC_AARCH64_MOVW_G1:
4840     case BFD_RELOC_AARCH64_MOVW_G1_NC:
4841     case BFD_RELOC_AARCH64_MOVW_G1_S:
4842     case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4843     case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4844     case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4845     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
4846     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
4847     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
4848     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4849     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4850       shift = 16;
4851       break;
4852     case BFD_RELOC_AARCH64_MOVW_G2:
4853     case BFD_RELOC_AARCH64_MOVW_G2_NC:
4854     case BFD_RELOC_AARCH64_MOVW_G2_S:
4855     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
4856     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4857       if (is32)
4858 	{
4859 	  set_fatal_syntax_error
4860 	    (_("the specified relocation type is not allowed for 32-bit "
4861 	       "register"));
4862 	  return FALSE;
4863 	}
4864       shift = 32;
4865       break;
4866     case BFD_RELOC_AARCH64_MOVW_G3:
4867       if (is32)
4868 	{
4869 	  set_fatal_syntax_error
4870 	    (_("the specified relocation type is not allowed for 32-bit "
4871 	       "register"));
4872 	  return FALSE;
4873 	}
4874       shift = 48;
4875       break;
4876     default:
4877       /* More cases should be added when more MOVW-related relocation types
4878          are supported in GAS.  */
4879       gas_assert (aarch64_gas_internal_fixup_p ());
4880       /* The shift amount should have already been set by the parser.  */
4881       return TRUE;
4882     }
4883   inst.base.operands[1].shifter.amount = shift;
4884   return TRUE;
4885 }
4886 
4887 /* A primitive log caculator.  */
4888 
4889 static inline unsigned int
get_logsz(unsigned int size)4890 get_logsz (unsigned int size)
4891 {
4892   const unsigned char ls[16] =
4893     {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4894   if (size > 16)
4895     {
4896       gas_assert (0);
4897       return -1;
4898     }
4899   gas_assert (ls[size - 1] != (unsigned char)-1);
4900   return ls[size - 1];
4901 }
4902 
4903 /* Determine and return the real reloc type code for an instruction
4904    with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12.  */
4905 
4906 static inline bfd_reloc_code_real_type
ldst_lo12_determine_real_reloc_type(void)4907 ldst_lo12_determine_real_reloc_type (void)
4908 {
4909   unsigned logsz;
4910   enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4911   enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4912 
4913   const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
4914     {
4915       BFD_RELOC_AARCH64_LDST8_LO12,
4916       BFD_RELOC_AARCH64_LDST16_LO12,
4917       BFD_RELOC_AARCH64_LDST32_LO12,
4918       BFD_RELOC_AARCH64_LDST64_LO12,
4919       BFD_RELOC_AARCH64_LDST128_LO12
4920     },
4921     {
4922       BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
4923       BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
4924       BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
4925       BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
4926       BFD_RELOC_AARCH64_NONE
4927     },
4928     {
4929       BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
4930       BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
4931       BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
4932       BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
4933       BFD_RELOC_AARCH64_NONE
4934     }
4935   };
4936 
4937   gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
4938 	      || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4939 	      || (inst.reloc.type
4940 		  == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
4941   gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4942 
4943   if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4944     opd1_qlf =
4945       aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4946 				      1, opd0_qlf, 0);
4947   gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4948 
4949   logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4950   if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4951       || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
4952     gas_assert (logsz <= 3);
4953   else
4954     gas_assert (logsz <= 4);
4955 
4956   /* In reloc.c, these pseudo relocation types should be defined in similar
4957      order as above reloc_ldst_lo12 array. Because the array index calcuation
4958      below relies on this.  */
4959   return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
4960 }
4961 
4962 /* Check whether a register list REGINFO is valid.  The registers must be
4963    numbered in increasing order (modulo 32), in increments of one or two.
4964 
4965    If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4966    increments of two.
4967 
4968    Return FALSE if such a register list is invalid, otherwise return TRUE.  */
4969 
4970 static bfd_boolean
reg_list_valid_p(uint32_t reginfo,int accept_alternate)4971 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4972 {
4973   uint32_t i, nb_regs, prev_regno, incr;
4974 
4975   nb_regs = 1 + (reginfo & 0x3);
4976   reginfo >>= 2;
4977   prev_regno = reginfo & 0x1f;
4978   incr = accept_alternate ? 2 : 1;
4979 
4980   for (i = 1; i < nb_regs; ++i)
4981     {
4982       uint32_t curr_regno;
4983       reginfo >>= 5;
4984       curr_regno = reginfo & 0x1f;
4985       if (curr_regno != ((prev_regno + incr) & 0x1f))
4986 	return FALSE;
4987       prev_regno = curr_regno;
4988     }
4989 
4990   return TRUE;
4991 }
4992 
4993 /* Generic instruction operand parser.	This does no encoding and no
4994    semantic validation; it merely squirrels values away in the inst
4995    structure.  Returns TRUE or FALSE depending on whether the
4996    specified grammar matched.  */
4997 
4998 static bfd_boolean
parse_operands(char * str,const aarch64_opcode * opcode)4999 parse_operands (char *str, const aarch64_opcode *opcode)
5000 {
5001   int i;
5002   char *backtrack_pos = 0;
5003   const enum aarch64_opnd *operands = opcode->operands;
5004 
5005   clear_error ();
5006   skip_whitespace (str);
5007 
5008   for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5009     {
5010       int64_t val;
5011       int isreg32, isregzero;
5012       int comma_skipped_p = 0;
5013       aarch64_reg_type rtype;
5014       struct neon_type_el vectype;
5015       aarch64_opnd_info *info = &inst.base.operands[i];
5016 
5017       DEBUG_TRACE ("parse operand %d", i);
5018 
5019       /* Assign the operand code.  */
5020       info->type = operands[i];
5021 
5022       if (optional_operand_p (opcode, i))
5023 	{
5024 	  /* Remember where we are in case we need to backtrack.  */
5025 	  gas_assert (!backtrack_pos);
5026 	  backtrack_pos = str;
5027 	}
5028 
5029       /* Expect comma between operands; the backtrack mechanizm will take
5030 	 care of cases of omitted optional operand.  */
5031       if (i > 0 && ! skip_past_char (&str, ','))
5032 	{
5033 	  set_syntax_error (_("comma expected between operands"));
5034 	  goto failure;
5035 	}
5036       else
5037 	comma_skipped_p = 1;
5038 
5039       switch (operands[i])
5040 	{
5041 	case AARCH64_OPND_Rd:
5042 	case AARCH64_OPND_Rn:
5043 	case AARCH64_OPND_Rm:
5044 	case AARCH64_OPND_Rt:
5045 	case AARCH64_OPND_Rt2:
5046 	case AARCH64_OPND_Rs:
5047 	case AARCH64_OPND_Ra:
5048 	case AARCH64_OPND_Rt_SYS:
5049 	case AARCH64_OPND_PAIRREG:
5050 	  po_int_reg_or_fail (1, 0);
5051 	  break;
5052 
5053 	case AARCH64_OPND_Rd_SP:
5054 	case AARCH64_OPND_Rn_SP:
5055 	  po_int_reg_or_fail (0, 1);
5056 	  break;
5057 
5058 	case AARCH64_OPND_Rm_EXT:
5059 	case AARCH64_OPND_Rm_SFT:
5060 	  po_misc_or_fail (parse_shifter_operand
5061 			   (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5062 					 ? SHIFTED_ARITH_IMM
5063 					 : SHIFTED_LOGIC_IMM)));
5064 	  if (!info->shifter.operator_present)
5065 	    {
5066 	      /* Default to LSL if not present.  Libopcodes prefers shifter
5067 		 kind to be explicit.  */
5068 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5069 	      info->shifter.kind = AARCH64_MOD_LSL;
5070 	      /* For Rm_EXT, libopcodes will carry out further check on whether
5071 		 or not stack pointer is used in the instruction (Recall that
5072 		 "the extend operator is not optional unless at least one of
5073 		 "Rd" or "Rn" is '11111' (i.e. WSP)").  */
5074 	    }
5075 	  break;
5076 
5077 	case AARCH64_OPND_Fd:
5078 	case AARCH64_OPND_Fn:
5079 	case AARCH64_OPND_Fm:
5080 	case AARCH64_OPND_Fa:
5081 	case AARCH64_OPND_Ft:
5082 	case AARCH64_OPND_Ft2:
5083 	case AARCH64_OPND_Sd:
5084 	case AARCH64_OPND_Sn:
5085 	case AARCH64_OPND_Sm:
5086 	  val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5087 	  if (val == PARSE_FAIL)
5088 	    {
5089 	      first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5090 	      goto failure;
5091 	    }
5092 	  gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5093 
5094 	  info->reg.regno = val;
5095 	  info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5096 	  break;
5097 
5098 	case AARCH64_OPND_Vd:
5099 	case AARCH64_OPND_Vn:
5100 	case AARCH64_OPND_Vm:
5101 	  val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5102 	  if (val == PARSE_FAIL)
5103 	    {
5104 	      first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5105 	      goto failure;
5106 	    }
5107 	  if (vectype.defined & NTA_HASINDEX)
5108 	    goto failure;
5109 
5110 	  info->reg.regno = val;
5111 	  info->qualifier = vectype_to_qualifier (&vectype);
5112 	  if (info->qualifier == AARCH64_OPND_QLF_NIL)
5113 	    goto failure;
5114 	  break;
5115 
5116 	case AARCH64_OPND_VdD1:
5117 	case AARCH64_OPND_VnD1:
5118 	  val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5119 	  if (val == PARSE_FAIL)
5120 	    {
5121 	      set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5122 	      goto failure;
5123 	    }
5124 	  if (vectype.type != NT_d || vectype.index != 1)
5125 	    {
5126 	      set_fatal_syntax_error
5127 		(_("the top half of a 128-bit FP/SIMD register is expected"));
5128 	      goto failure;
5129 	    }
5130 	  info->reg.regno = val;
5131 	  /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5132 	     here; it is correct for the purpose of encoding/decoding since
5133 	     only the register number is explicitly encoded in the related
5134 	     instructions, although this appears a bit hacky.  */
5135 	  info->qualifier = AARCH64_OPND_QLF_S_D;
5136 	  break;
5137 
5138 	case AARCH64_OPND_Ed:
5139 	case AARCH64_OPND_En:
5140 	case AARCH64_OPND_Em:
5141 	  val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5142 	  if (val == PARSE_FAIL)
5143 	    {
5144 	      first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5145 	      goto failure;
5146 	    }
5147 	  if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5148 	    goto failure;
5149 
5150 	  info->reglane.regno = val;
5151 	  info->reglane.index = vectype.index;
5152 	  info->qualifier = vectype_to_qualifier (&vectype);
5153 	  if (info->qualifier == AARCH64_OPND_QLF_NIL)
5154 	    goto failure;
5155 	  break;
5156 
5157 	case AARCH64_OPND_LVn:
5158 	case AARCH64_OPND_LVt:
5159 	case AARCH64_OPND_LVt_AL:
5160 	case AARCH64_OPND_LEt:
5161 	  if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
5162 	    goto failure;
5163 	  if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5164 	    {
5165 	      set_fatal_syntax_error (_("invalid register list"));
5166 	      goto failure;
5167 	    }
5168 	  info->reglist.first_regno = (val >> 2) & 0x1f;
5169 	  info->reglist.num_regs = (val & 0x3) + 1;
5170 	  if (operands[i] == AARCH64_OPND_LEt)
5171 	    {
5172 	      if (!(vectype.defined & NTA_HASINDEX))
5173 		goto failure;
5174 	      info->reglist.has_index = 1;
5175 	      info->reglist.index = vectype.index;
5176 	    }
5177 	  else if (!(vectype.defined & NTA_HASTYPE))
5178 	    goto failure;
5179 	  info->qualifier = vectype_to_qualifier (&vectype);
5180 	  if (info->qualifier == AARCH64_OPND_QLF_NIL)
5181 	    goto failure;
5182 	  break;
5183 
5184 	case AARCH64_OPND_Cn:
5185 	case AARCH64_OPND_Cm:
5186 	  po_reg_or_fail (REG_TYPE_CN);
5187 	  if (val > 15)
5188 	    {
5189 	      set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5190 	      goto failure;
5191 	    }
5192 	  inst.base.operands[i].reg.regno = val;
5193 	  break;
5194 
5195 	case AARCH64_OPND_SHLL_IMM:
5196 	case AARCH64_OPND_IMM_VLSR:
5197 	  po_imm_or_fail (1, 64);
5198 	  info->imm.value = val;
5199 	  break;
5200 
5201 	case AARCH64_OPND_CCMP_IMM:
5202 	case AARCH64_OPND_FBITS:
5203 	case AARCH64_OPND_UIMM4:
5204 	case AARCH64_OPND_UIMM3_OP1:
5205 	case AARCH64_OPND_UIMM3_OP2:
5206 	case AARCH64_OPND_IMM_VLSL:
5207 	case AARCH64_OPND_IMM:
5208 	case AARCH64_OPND_WIDTH:
5209 	  po_imm_nc_or_fail ();
5210 	  info->imm.value = val;
5211 	  break;
5212 
5213 	case AARCH64_OPND_UIMM7:
5214 	  po_imm_or_fail (0, 127);
5215 	  info->imm.value = val;
5216 	  break;
5217 
5218 	case AARCH64_OPND_IDX:
5219 	case AARCH64_OPND_BIT_NUM:
5220 	case AARCH64_OPND_IMMR:
5221 	case AARCH64_OPND_IMMS:
5222 	  po_imm_or_fail (0, 63);
5223 	  info->imm.value = val;
5224 	  break;
5225 
5226 	case AARCH64_OPND_IMM0:
5227 	  po_imm_nc_or_fail ();
5228 	  if (val != 0)
5229 	    {
5230 	      set_fatal_syntax_error (_("immediate zero expected"));
5231 	      goto failure;
5232 	    }
5233 	  info->imm.value = 0;
5234 	  break;
5235 
5236 	case AARCH64_OPND_FPIMM0:
5237 	  {
5238 	    int qfloat;
5239 	    bfd_boolean res1 = FALSE, res2 = FALSE;
5240 	    /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5241 	       it is probably not worth the effort to support it.  */
5242 	    if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
5243 		&& !(res2 = parse_constant_immediate (&str, &val)))
5244 	      goto failure;
5245 	    if ((res1 && qfloat == 0) || (res2 && val == 0))
5246 	      {
5247 		info->imm.value = 0;
5248 		info->imm.is_fp = 1;
5249 		break;
5250 	      }
5251 	    set_fatal_syntax_error (_("immediate zero expected"));
5252 	    goto failure;
5253 	  }
5254 
5255 	case AARCH64_OPND_IMM_MOV:
5256 	  {
5257 	    char *saved = str;
5258 	    if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5259 		reg_name_p (str, REG_TYPE_VN))
5260 	      goto failure;
5261 	    str = saved;
5262 	    po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5263 						GE_OPT_PREFIX, 1));
5264 	    /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5265 	       later.  fix_mov_imm_insn will try to determine a machine
5266 	       instruction (MOVZ, MOVN or ORR) for it and will issue an error
5267 	       message if the immediate cannot be moved by a single
5268 	       instruction.  */
5269 	    aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5270 	    inst.base.operands[i].skip = 1;
5271 	  }
5272 	  break;
5273 
5274 	case AARCH64_OPND_SIMD_IMM:
5275 	case AARCH64_OPND_SIMD_IMM_SFT:
5276 	  if (! parse_big_immediate (&str, &val))
5277 	    goto failure;
5278 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5279 					      /* addr_off_p */ 0,
5280 					      /* need_libopcodes_p */ 1,
5281 					      /* skip_p */ 1);
5282 	  /* Parse shift.
5283 	     N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5284 	     shift, we don't check it here; we leave the checking to
5285 	     the libopcodes (operand_general_constraint_met_p).  By
5286 	     doing this, we achieve better diagnostics.  */
5287 	  if (skip_past_comma (&str)
5288 	      && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5289 	    goto failure;
5290 	  if (!info->shifter.operator_present
5291 	      && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5292 	    {
5293 	      /* Default to LSL if not present.  Libopcodes prefers shifter
5294 		 kind to be explicit.  */
5295 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5296 	      info->shifter.kind = AARCH64_MOD_LSL;
5297 	    }
5298 	  break;
5299 
5300 	case AARCH64_OPND_FPIMM:
5301 	case AARCH64_OPND_SIMD_FPIMM:
5302 	  {
5303 	    int qfloat;
5304 	    bfd_boolean dp_p
5305 	      = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5306 		 == 8);
5307 	    if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
5308 	      goto failure;
5309 	    if (qfloat == 0)
5310 	      {
5311 		set_fatal_syntax_error (_("invalid floating-point constant"));
5312 		goto failure;
5313 	      }
5314 	    inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5315 	    inst.base.operands[i].imm.is_fp = 1;
5316 	  }
5317 	  break;
5318 
5319 	case AARCH64_OPND_LIMM:
5320 	  po_misc_or_fail (parse_shifter_operand (&str, info,
5321 						  SHIFTED_LOGIC_IMM));
5322 	  if (info->shifter.operator_present)
5323 	    {
5324 	      set_fatal_syntax_error
5325 		(_("shift not allowed for bitmask immediate"));
5326 	      goto failure;
5327 	    }
5328 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5329 					      /* addr_off_p */ 0,
5330 					      /* need_libopcodes_p */ 1,
5331 					      /* skip_p */ 1);
5332 	  break;
5333 
5334 	case AARCH64_OPND_AIMM:
5335 	  if (opcode->op == OP_ADD)
5336 	    /* ADD may have relocation types.  */
5337 	    po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5338 							  SHIFTED_ARITH_IMM));
5339 	  else
5340 	    po_misc_or_fail (parse_shifter_operand (&str, info,
5341 						    SHIFTED_ARITH_IMM));
5342 	  switch (inst.reloc.type)
5343 	    {
5344 	    case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5345 	      info->shifter.amount = 12;
5346 	      break;
5347 	    case BFD_RELOC_UNUSED:
5348 	      aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5349 	      if (info->shifter.kind != AARCH64_MOD_NONE)
5350 		inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5351 	      inst.reloc.pc_rel = 0;
5352 	      break;
5353 	    default:
5354 	      break;
5355 	    }
5356 	  info->imm.value = 0;
5357 	  if (!info->shifter.operator_present)
5358 	    {
5359 	      /* Default to LSL if not present.  Libopcodes prefers shifter
5360 		 kind to be explicit.  */
5361 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5362 	      info->shifter.kind = AARCH64_MOD_LSL;
5363 	    }
5364 	  break;
5365 
5366 	case AARCH64_OPND_HALF:
5367 	    {
5368 	      /* #<imm16> or relocation.  */
5369 	      int internal_fixup_p;
5370 	      po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5371 	      if (internal_fixup_p)
5372 		aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5373 	      skip_whitespace (str);
5374 	      if (skip_past_comma (&str))
5375 		{
5376 		  /* {, LSL #<shift>}  */
5377 		  if (! aarch64_gas_internal_fixup_p ())
5378 		    {
5379 		      set_fatal_syntax_error (_("can't mix relocation modifier "
5380 						"with explicit shift"));
5381 		      goto failure;
5382 		    }
5383 		  po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5384 		}
5385 	      else
5386 		inst.base.operands[i].shifter.amount = 0;
5387 	      inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5388 	      inst.base.operands[i].imm.value = 0;
5389 	      if (! process_movw_reloc_info ())
5390 		goto failure;
5391 	    }
5392 	  break;
5393 
5394 	case AARCH64_OPND_EXCEPTION:
5395 	  po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5396 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5397 					      /* addr_off_p */ 0,
5398 					      /* need_libopcodes_p */ 0,
5399 					      /* skip_p */ 1);
5400 	  break;
5401 
5402 	case AARCH64_OPND_NZCV:
5403 	  {
5404 	    const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5405 	    if (nzcv != NULL)
5406 	      {
5407 		str += 4;
5408 		info->imm.value = nzcv->value;
5409 		break;
5410 	      }
5411 	    po_imm_or_fail (0, 15);
5412 	    info->imm.value = val;
5413 	  }
5414 	  break;
5415 
5416 	case AARCH64_OPND_COND:
5417 	case AARCH64_OPND_COND1:
5418 	  info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5419 	  str += 2;
5420 	  if (info->cond == NULL)
5421 	    {
5422 	      set_syntax_error (_("invalid condition"));
5423 	      goto failure;
5424 	    }
5425 	  else if (operands[i] == AARCH64_OPND_COND1
5426 		   && (info->cond->value & 0xe) == 0xe)
5427 	    {
5428 	      /* Not allow AL or NV.  */
5429 	      set_default_error ();
5430 	      goto failure;
5431 	    }
5432 	  break;
5433 
5434 	case AARCH64_OPND_ADDR_ADRP:
5435 	  po_misc_or_fail (parse_adrp (&str));
5436 	  /* Clear the value as operand needs to be relocated.  */
5437 	  info->imm.value = 0;
5438 	  break;
5439 
5440 	case AARCH64_OPND_ADDR_PCREL14:
5441 	case AARCH64_OPND_ADDR_PCREL19:
5442 	case AARCH64_OPND_ADDR_PCREL21:
5443 	case AARCH64_OPND_ADDR_PCREL26:
5444 	  po_misc_or_fail (parse_address_reloc (&str, info));
5445 	  if (!info->addr.pcrel)
5446 	    {
5447 	      set_syntax_error (_("invalid pc-relative address"));
5448 	      goto failure;
5449 	    }
5450 	  if (inst.gen_lit_pool
5451 	      && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5452 	    {
5453 	      /* Only permit "=value" in the literal load instructions.
5454 		 The literal will be generated by programmer_friendly_fixup.  */
5455 	      set_syntax_error (_("invalid use of \"=immediate\""));
5456 	      goto failure;
5457 	    }
5458 	  if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5459 	    {
5460 	      set_syntax_error (_("unrecognized relocation suffix"));
5461 	      goto failure;
5462 	    }
5463 	  if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5464 	    {
5465 	      info->imm.value = inst.reloc.exp.X_add_number;
5466 	      inst.reloc.type = BFD_RELOC_UNUSED;
5467 	    }
5468 	  else
5469 	    {
5470 	      info->imm.value = 0;
5471 	      if (inst.reloc.type == BFD_RELOC_UNUSED)
5472 		switch (opcode->iclass)
5473 		  {
5474 		  case compbranch:
5475 		  case condbranch:
5476 		    /* e.g. CBZ or B.COND  */
5477 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5478 		    inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5479 		    break;
5480 		  case testbranch:
5481 		    /* e.g. TBZ  */
5482 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5483 		    inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5484 		    break;
5485 		  case branch_imm:
5486 		    /* e.g. B or BL  */
5487 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5488 		    inst.reloc.type =
5489 		      (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5490 			 : BFD_RELOC_AARCH64_JUMP26;
5491 		    break;
5492 		  case loadlit:
5493 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5494 		    inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5495 		    break;
5496 		  case pcreladdr:
5497 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5498 		    inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5499 		    break;
5500 		  default:
5501 		    gas_assert (0);
5502 		    abort ();
5503 		  }
5504 	      inst.reloc.pc_rel = 1;
5505 	    }
5506 	  break;
5507 
5508 	case AARCH64_OPND_ADDR_SIMPLE:
5509 	case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5510 	  /* [<Xn|SP>{, #<simm>}]  */
5511 	  po_char_or_fail ('[');
5512 	  po_reg_or_fail (REG_TYPE_R64_SP);
5513 	  /* Accept optional ", #0".  */
5514 	  if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5515 	      && skip_past_char (&str, ','))
5516 	    {
5517 	      skip_past_char (&str, '#');
5518 	      if (! skip_past_char (&str, '0'))
5519 		{
5520 		  set_fatal_syntax_error
5521 		    (_("the optional immediate offset can only be 0"));
5522 		  goto failure;
5523 		}
5524 	    }
5525 	  po_char_or_fail (']');
5526 	  info->addr.base_regno = val;
5527 	  break;
5528 
5529 	case AARCH64_OPND_ADDR_REGOFF:
5530 	  /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}]  */
5531 	  po_misc_or_fail (parse_address (&str, info, 0));
5532 	  if (info->addr.pcrel || !info->addr.offset.is_reg
5533 	      || !info->addr.preind || info->addr.postind
5534 	      || info->addr.writeback)
5535 	    {
5536 	      set_syntax_error (_("invalid addressing mode"));
5537 	      goto failure;
5538 	    }
5539 	  if (!info->shifter.operator_present)
5540 	    {
5541 	      /* Default to LSL if not present.  Libopcodes prefers shifter
5542 		 kind to be explicit.  */
5543 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5544 	      info->shifter.kind = AARCH64_MOD_LSL;
5545 	    }
5546 	  /* Qualifier to be deduced by libopcodes.  */
5547 	  break;
5548 
5549 	case AARCH64_OPND_ADDR_SIMM7:
5550 	  po_misc_or_fail (parse_address (&str, info, 0));
5551 	  if (info->addr.pcrel || info->addr.offset.is_reg
5552 	      || (!info->addr.preind && !info->addr.postind))
5553 	    {
5554 	      set_syntax_error (_("invalid addressing mode"));
5555 	      goto failure;
5556 	    }
5557 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5558 					      /* addr_off_p */ 1,
5559 					      /* need_libopcodes_p */ 1,
5560 					      /* skip_p */ 0);
5561 	  break;
5562 
5563 	case AARCH64_OPND_ADDR_SIMM9:
5564 	case AARCH64_OPND_ADDR_SIMM9_2:
5565 	  po_misc_or_fail (parse_address_reloc (&str, info));
5566 	  if (info->addr.pcrel || info->addr.offset.is_reg
5567 	      || (!info->addr.preind && !info->addr.postind)
5568 	      || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5569 		  && info->addr.writeback))
5570 	    {
5571 	      set_syntax_error (_("invalid addressing mode"));
5572 	      goto failure;
5573 	    }
5574 	  if (inst.reloc.type != BFD_RELOC_UNUSED)
5575 	    {
5576 	      set_syntax_error (_("relocation not allowed"));
5577 	      goto failure;
5578 	    }
5579 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5580 					      /* addr_off_p */ 1,
5581 					      /* need_libopcodes_p */ 1,
5582 					      /* skip_p */ 0);
5583 	  break;
5584 
5585 	case AARCH64_OPND_ADDR_UIMM12:
5586 	  po_misc_or_fail (parse_address_reloc (&str, info));
5587 	  if (info->addr.pcrel || info->addr.offset.is_reg
5588 	      || !info->addr.preind || info->addr.writeback)
5589 	    {
5590 	      set_syntax_error (_("invalid addressing mode"));
5591 	      goto failure;
5592 	    }
5593 	  if (inst.reloc.type == BFD_RELOC_UNUSED)
5594 	    aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5595 	  else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5596 		   || (inst.reloc.type
5597 		       == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
5598 		   || (inst.reloc.type
5599 		       == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
5600 	    inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5601 	  /* Leave qualifier to be determined by libopcodes.  */
5602 	  break;
5603 
5604 	case AARCH64_OPND_SIMD_ADDR_POST:
5605 	  /* [<Xn|SP>], <Xm|#<amount>>  */
5606 	  po_misc_or_fail (parse_address (&str, info, 1));
5607 	  if (!info->addr.postind || !info->addr.writeback)
5608 	    {
5609 	      set_syntax_error (_("invalid addressing mode"));
5610 	      goto failure;
5611 	    }
5612 	  if (!info->addr.offset.is_reg)
5613 	    {
5614 	      if (inst.reloc.exp.X_op == O_constant)
5615 		info->addr.offset.imm = inst.reloc.exp.X_add_number;
5616 	      else
5617 		{
5618 		  set_fatal_syntax_error
5619 		    (_("writeback value should be an immediate constant"));
5620 		  goto failure;
5621 		}
5622 	    }
5623 	  /* No qualifier.  */
5624 	  break;
5625 
5626 	case AARCH64_OPND_SYSREG:
5627 	  if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5628 	      == PARSE_FAIL)
5629 	    {
5630 	      set_syntax_error (_("unknown or missing system register name"));
5631 	      goto failure;
5632 	    }
5633 	  inst.base.operands[i].sysreg = val;
5634 	  break;
5635 
5636 	case AARCH64_OPND_PSTATEFIELD:
5637 	  if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5638 	      == PARSE_FAIL)
5639 	    {
5640 	      set_syntax_error (_("unknown or missing PSTATE field name"));
5641 	      goto failure;
5642 	    }
5643 	  inst.base.operands[i].pstatefield = val;
5644 	  break;
5645 
5646 	case AARCH64_OPND_SYSREG_IC:
5647 	  inst.base.operands[i].sysins_op =
5648 	    parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5649 	  goto sys_reg_ins;
5650 	case AARCH64_OPND_SYSREG_DC:
5651 	  inst.base.operands[i].sysins_op =
5652 	    parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5653 	  goto sys_reg_ins;
5654 	case AARCH64_OPND_SYSREG_AT:
5655 	  inst.base.operands[i].sysins_op =
5656 	    parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5657 	  goto sys_reg_ins;
5658 	case AARCH64_OPND_SYSREG_TLBI:
5659 	  inst.base.operands[i].sysins_op =
5660 	    parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5661 sys_reg_ins:
5662 	  if (inst.base.operands[i].sysins_op == NULL)
5663 	    {
5664 	      set_fatal_syntax_error ( _("unknown or missing operation name"));
5665 	      goto failure;
5666 	    }
5667 	  break;
5668 
5669 	case AARCH64_OPND_BARRIER:
5670 	case AARCH64_OPND_BARRIER_ISB:
5671 	  val = parse_barrier (&str);
5672 	  if (val != PARSE_FAIL
5673 	      && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5674 	    {
5675 	      /* ISB only accepts options name 'sy'.  */
5676 	      set_syntax_error
5677 		(_("the specified option is not accepted in ISB"));
5678 	      /* Turn off backtrack as this optional operand is present.  */
5679 	      backtrack_pos = 0;
5680 	      goto failure;
5681 	    }
5682 	  /* This is an extension to accept a 0..15 immediate.  */
5683 	  if (val == PARSE_FAIL)
5684 	    po_imm_or_fail (0, 15);
5685 	  info->barrier = aarch64_barrier_options + val;
5686 	  break;
5687 
5688 	case AARCH64_OPND_PRFOP:
5689 	  val = parse_pldop (&str);
5690 	  /* This is an extension to accept a 0..31 immediate.  */
5691 	  if (val == PARSE_FAIL)
5692 	    po_imm_or_fail (0, 31);
5693 	  inst.base.operands[i].prfop = aarch64_prfops + val;
5694 	  break;
5695 
5696 	case AARCH64_OPND_BARRIER_PSB:
5697 	  val = parse_barrier_psb (&str, &(info->hint_option));
5698 	  if (val == PARSE_FAIL)
5699 	    goto failure;
5700 	  break;
5701 
5702 	default:
5703 	  as_fatal (_("unhandled operand code %d"), operands[i]);
5704 	}
5705 
5706       /* If we get here, this operand was successfully parsed.  */
5707       inst.base.operands[i].present = 1;
5708       continue;
5709 
5710 failure:
5711       /* The parse routine should already have set the error, but in case
5712 	 not, set a default one here.  */
5713       if (! error_p ())
5714 	set_default_error ();
5715 
5716       if (! backtrack_pos)
5717 	goto parse_operands_return;
5718 
5719       {
5720 	/* We reach here because this operand is marked as optional, and
5721 	   either no operand was supplied or the operand was supplied but it
5722 	   was syntactically incorrect.  In the latter case we report an
5723 	   error.  In the former case we perform a few more checks before
5724 	   dropping through to the code to insert the default operand.  */
5725 
5726 	char *tmp = backtrack_pos;
5727 	char endchar = END_OF_INSN;
5728 
5729 	if (i != (aarch64_num_of_operands (opcode) - 1))
5730 	  endchar = ',';
5731 	skip_past_char (&tmp, ',');
5732 
5733 	if (*tmp != endchar)
5734 	  /* The user has supplied an operand in the wrong format.  */
5735 	  goto parse_operands_return;
5736 
5737 	/* Make sure there is not a comma before the optional operand.
5738 	   For example the fifth operand of 'sys' is optional:
5739 
5740 	     sys #0,c0,c0,#0,  <--- wrong
5741 	     sys #0,c0,c0,#0   <--- correct.  */
5742 	if (comma_skipped_p && i && endchar == END_OF_INSN)
5743 	  {
5744 	    set_fatal_syntax_error
5745 	      (_("unexpected comma before the omitted optional operand"));
5746 	    goto parse_operands_return;
5747 	  }
5748       }
5749 
5750       /* Reaching here means we are dealing with an optional operand that is
5751 	 omitted from the assembly line.  */
5752       gas_assert (optional_operand_p (opcode, i));
5753       info->present = 0;
5754       process_omitted_operand (operands[i], opcode, i, info);
5755 
5756       /* Try again, skipping the optional operand at backtrack_pos.  */
5757       str = backtrack_pos;
5758       backtrack_pos = 0;
5759 
5760       /* Clear any error record after the omitted optional operand has been
5761 	 successfully handled.  */
5762       clear_error ();
5763     }
5764 
5765   /* Check if we have parsed all the operands.  */
5766   if (*str != '\0' && ! error_p ())
5767     {
5768       /* Set I to the index of the last present operand; this is
5769 	 for the purpose of diagnostics.  */
5770       for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5771 	;
5772       set_fatal_syntax_error
5773 	(_("unexpected characters following instruction"));
5774     }
5775 
5776 parse_operands_return:
5777 
5778   if (error_p ())
5779     {
5780       DEBUG_TRACE ("parsing FAIL: %s - %s",
5781 		   operand_mismatch_kind_names[get_error_kind ()],
5782 		   get_error_message ());
5783       /* Record the operand error properly; this is useful when there
5784 	 are multiple instruction templates for a mnemonic name, so that
5785 	 later on, we can select the error that most closely describes
5786 	 the problem.  */
5787       record_operand_error (opcode, i, get_error_kind (),
5788 			    get_error_message ());
5789       return FALSE;
5790     }
5791   else
5792     {
5793       DEBUG_TRACE ("parsing SUCCESS");
5794       return TRUE;
5795     }
5796 }
5797 
5798 /* It does some fix-up to provide some programmer friendly feature while
5799    keeping the libopcodes happy, i.e. libopcodes only accepts
5800    the preferred architectural syntax.
5801    Return FALSE if there is any failure; otherwise return TRUE.  */
5802 
5803 static bfd_boolean
programmer_friendly_fixup(aarch64_instruction * instr)5804 programmer_friendly_fixup (aarch64_instruction *instr)
5805 {
5806   aarch64_inst *base = &instr->base;
5807   const aarch64_opcode *opcode = base->opcode;
5808   enum aarch64_op op = opcode->op;
5809   aarch64_opnd_info *operands = base->operands;
5810 
5811   DEBUG_TRACE ("enter");
5812 
5813   switch (opcode->iclass)
5814     {
5815     case testbranch:
5816       /* TBNZ Xn|Wn, #uimm6, label
5817 	 Test and Branch Not Zero: conditionally jumps to label if bit number
5818 	 uimm6 in register Xn is not zero.  The bit number implies the width of
5819 	 the register, which may be written and should be disassembled as Wn if
5820 	 uimm is less than 32.  */
5821       if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5822 	{
5823 	  if (operands[1].imm.value >= 32)
5824 	    {
5825 	      record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5826 						 0, 31);
5827 	      return FALSE;
5828 	    }
5829 	  operands[0].qualifier = AARCH64_OPND_QLF_X;
5830 	}
5831       break;
5832     case loadlit:
5833       /* LDR Wt, label | =value
5834 	 As a convenience assemblers will typically permit the notation
5835 	 "=value" in conjunction with the pc-relative literal load instructions
5836 	 to automatically place an immediate value or symbolic address in a
5837 	 nearby literal pool and generate a hidden label which references it.
5838 	 ISREG has been set to 0 in the case of =value.  */
5839       if (instr->gen_lit_pool
5840 	  && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5841 	{
5842 	  int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5843 	  if (op == OP_LDRSW_LIT)
5844 	    size = 4;
5845 	  if (instr->reloc.exp.X_op != O_constant
5846 	      && instr->reloc.exp.X_op != O_big
5847 	      && instr->reloc.exp.X_op != O_symbol)
5848 	    {
5849 	      record_operand_error (opcode, 1,
5850 				    AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5851 				    _("constant expression expected"));
5852 	      return FALSE;
5853 	    }
5854 	  if (! add_to_lit_pool (&instr->reloc.exp, size))
5855 	    {
5856 	      record_operand_error (opcode, 1,
5857 				    AARCH64_OPDE_OTHER_ERROR,
5858 				    _("literal pool insertion failed"));
5859 	      return FALSE;
5860 	    }
5861 	}
5862       break;
5863     case log_shift:
5864     case bitfield:
5865       /* UXT[BHW] Wd, Wn
5866 	 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5867 	 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5868 	 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5869 	 A programmer-friendly assembler should accept a destination Xd in
5870 	 place of Wd, however that is not the preferred form for disassembly.
5871 	 */
5872       if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5873 	  && operands[1].qualifier == AARCH64_OPND_QLF_W
5874 	  && operands[0].qualifier == AARCH64_OPND_QLF_X)
5875 	operands[0].qualifier = AARCH64_OPND_QLF_W;
5876       break;
5877 
5878     case addsub_ext:
5879 	{
5880 	  /* In the 64-bit form, the final register operand is written as Wm
5881 	     for all but the (possibly omitted) UXTX/LSL and SXTX
5882 	     operators.
5883 	     As a programmer-friendly assembler, we accept e.g.
5884 	     ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5885 	     ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}.  */
5886 	  int idx = aarch64_operand_index (opcode->operands,
5887 					   AARCH64_OPND_Rm_EXT);
5888 	  gas_assert (idx == 1 || idx == 2);
5889 	  if (operands[0].qualifier == AARCH64_OPND_QLF_X
5890 	      && operands[idx].qualifier == AARCH64_OPND_QLF_X
5891 	      && operands[idx].shifter.kind != AARCH64_MOD_LSL
5892 	      && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5893 	      && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5894 	    operands[idx].qualifier = AARCH64_OPND_QLF_W;
5895 	}
5896       break;
5897 
5898     default:
5899       break;
5900     }
5901 
5902   DEBUG_TRACE ("exit with SUCCESS");
5903   return TRUE;
5904 }
5905 
5906 /* Check for loads and stores that will cause unpredictable behavior.  */
5907 
5908 static void
warn_unpredictable_ldst(aarch64_instruction * instr,char * str)5909 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5910 {
5911   aarch64_inst *base = &instr->base;
5912   const aarch64_opcode *opcode = base->opcode;
5913   const aarch64_opnd_info *opnds = base->operands;
5914   switch (opcode->iclass)
5915     {
5916     case ldst_pos:
5917     case ldst_imm9:
5918     case ldst_unscaled:
5919     case ldst_unpriv:
5920       /* Loading/storing the base register is unpredictable if writeback.  */
5921       if ((aarch64_get_operand_class (opnds[0].type)
5922 	   == AARCH64_OPND_CLASS_INT_REG)
5923 	  && opnds[0].reg.regno == opnds[1].addr.base_regno
5924 	  && opnds[1].addr.base_regno != REG_SP
5925 	  && opnds[1].addr.writeback)
5926 	as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5927       break;
5928     case ldstpair_off:
5929     case ldstnapair_offs:
5930     case ldstpair_indexed:
5931       /* Loading/storing the base register is unpredictable if writeback.  */
5932       if ((aarch64_get_operand_class (opnds[0].type)
5933 	   == AARCH64_OPND_CLASS_INT_REG)
5934 	  && (opnds[0].reg.regno == opnds[2].addr.base_regno
5935 	    || opnds[1].reg.regno == opnds[2].addr.base_regno)
5936 	  && opnds[2].addr.base_regno != REG_SP
5937 	  && opnds[2].addr.writeback)
5938 	    as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5939       /* Load operations must load different registers.  */
5940       if ((opcode->opcode & (1 << 22))
5941 	  && opnds[0].reg.regno == opnds[1].reg.regno)
5942 	    as_warn (_("unpredictable load of register pair -- `%s'"), str);
5943       break;
5944     default:
5945       break;
5946     }
5947 }
5948 
5949 /* A wrapper function to interface with libopcodes on encoding and
5950    record the error message if there is any.
5951 
5952    Return TRUE on success; otherwise return FALSE.  */
5953 
5954 static bfd_boolean
do_encode(const aarch64_opcode * opcode,aarch64_inst * instr,aarch64_insn * code)5955 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5956 	   aarch64_insn *code)
5957 {
5958   aarch64_operand_error error_info;
5959   error_info.kind = AARCH64_OPDE_NIL;
5960   if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5961     return TRUE;
5962   else
5963     {
5964       gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5965       record_operand_error_info (opcode, &error_info);
5966       return FALSE;
5967     }
5968 }
5969 
5970 #ifdef DEBUG_AARCH64
5971 static inline void
dump_opcode_operands(const aarch64_opcode * opcode)5972 dump_opcode_operands (const aarch64_opcode *opcode)
5973 {
5974   int i = 0;
5975   while (opcode->operands[i] != AARCH64_OPND_NIL)
5976     {
5977       aarch64_verbose ("\t\t opnd%d: %s", i,
5978 		       aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5979 		       ? aarch64_get_operand_name (opcode->operands[i])
5980 		       : aarch64_get_operand_desc (opcode->operands[i]));
5981       ++i;
5982     }
5983 }
5984 #endif /* DEBUG_AARCH64 */
5985 
5986 /* This is the guts of the machine-dependent assembler.  STR points to a
5987    machine dependent instruction.  This function is supposed to emit
5988    the frags/bytes it assembles to.  */
5989 
5990 void
md_assemble(char * str)5991 md_assemble (char *str)
5992 {
5993   char *p = str;
5994   templates *template;
5995   aarch64_opcode *opcode;
5996   aarch64_inst *inst_base;
5997   unsigned saved_cond;
5998 
5999   /* Align the previous label if needed.  */
6000   if (last_label_seen != NULL)
6001     {
6002       symbol_set_frag (last_label_seen, frag_now);
6003       S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6004       S_SET_SEGMENT (last_label_seen, now_seg);
6005     }
6006 
6007   inst.reloc.type = BFD_RELOC_UNUSED;
6008 
6009   DEBUG_TRACE ("\n\n");
6010   DEBUG_TRACE ("==============================");
6011   DEBUG_TRACE ("Enter md_assemble with %s", str);
6012 
6013   template = opcode_lookup (&p);
6014   if (!template)
6015     {
6016       /* It wasn't an instruction, but it might be a register alias of
6017          the form alias .req reg directive.  */
6018       if (!create_register_alias (str, p))
6019 	as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6020 		str);
6021       return;
6022     }
6023 
6024   skip_whitespace (p);
6025   if (*p == ',')
6026     {
6027       as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6028 	      get_mnemonic_name (str), str);
6029       return;
6030     }
6031 
6032   init_operand_error_report ();
6033 
6034   /* Sections are assumed to start aligned. In executable section, there is no
6035      MAP_DATA symbol pending. So we only align the address during
6036      MAP_DATA --> MAP_INSN transition.
6037      For other sections, this is not guaranteed.  */
6038   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6039   if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6040     frag_align_code (2, 0);
6041 
6042   saved_cond = inst.cond;
6043   reset_aarch64_instruction (&inst);
6044   inst.cond = saved_cond;
6045 
6046   /* Iterate through all opcode entries with the same mnemonic name.  */
6047   do
6048     {
6049       opcode = template->opcode;
6050 
6051       DEBUG_TRACE ("opcode %s found", opcode->name);
6052 #ifdef DEBUG_AARCH64
6053       if (debug_dump)
6054 	dump_opcode_operands (opcode);
6055 #endif /* DEBUG_AARCH64 */
6056 
6057       mapping_state (MAP_INSN);
6058 
6059       inst_base = &inst.base;
6060       inst_base->opcode = opcode;
6061 
6062       /* Truly conditionally executed instructions, e.g. b.cond.  */
6063       if (opcode->flags & F_COND)
6064 	{
6065 	  gas_assert (inst.cond != COND_ALWAYS);
6066 	  inst_base->cond = get_cond_from_value (inst.cond);
6067 	  DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6068 	}
6069       else if (inst.cond != COND_ALWAYS)
6070 	{
6071 	  /* It shouldn't arrive here, where the assembly looks like a
6072 	     conditional instruction but the found opcode is unconditional.  */
6073 	  gas_assert (0);
6074 	  continue;
6075 	}
6076 
6077       if (parse_operands (p, opcode)
6078 	  && programmer_friendly_fixup (&inst)
6079 	  && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6080 	{
6081 	  /* Check that this instruction is supported for this CPU.  */
6082 	  if (!opcode->avariant
6083 	      || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
6084 	    {
6085 	      as_bad (_("selected processor does not support `%s'"), str);
6086 	      return;
6087 	    }
6088 
6089 	  warn_unpredictable_ldst (&inst, str);
6090 
6091 	  if (inst.reloc.type == BFD_RELOC_UNUSED
6092 	      || !inst.reloc.need_libopcodes_p)
6093 	    output_inst (NULL);
6094 	  else
6095 	    {
6096 	      /* If there is relocation generated for the instruction,
6097 	         store the instruction information for the future fix-up.  */
6098 	      struct aarch64_inst *copy;
6099 	      gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6100 	      copy = XNEW (struct aarch64_inst);
6101 	      memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6102 	      output_inst (copy);
6103 	    }
6104 	  return;
6105 	}
6106 
6107       template = template->next;
6108       if (template != NULL)
6109 	{
6110 	  reset_aarch64_instruction (&inst);
6111 	  inst.cond = saved_cond;
6112 	}
6113     }
6114   while (template != NULL);
6115 
6116   /* Issue the error messages if any.  */
6117   output_operand_error_report (str);
6118 }
6119 
6120 /* Various frobbings of labels and their addresses.  */
6121 
6122 void
aarch64_start_line_hook(void)6123 aarch64_start_line_hook (void)
6124 {
6125   last_label_seen = NULL;
6126 }
6127 
6128 void
aarch64_frob_label(symbolS * sym)6129 aarch64_frob_label (symbolS * sym)
6130 {
6131   last_label_seen = sym;
6132 
6133   dwarf2_emit_label (sym);
6134 }
6135 
6136 int
aarch64_data_in_code(void)6137 aarch64_data_in_code (void)
6138 {
6139   if (!strncmp (input_line_pointer + 1, "data:", 5))
6140     {
6141       *input_line_pointer = '/';
6142       input_line_pointer += 5;
6143       *input_line_pointer = 0;
6144       return 1;
6145     }
6146 
6147   return 0;
6148 }
6149 
6150 char *
aarch64_canonicalize_symbol_name(char * name)6151 aarch64_canonicalize_symbol_name (char *name)
6152 {
6153   int len;
6154 
6155   if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6156     *(name + len - 5) = 0;
6157 
6158   return name;
6159 }
6160 
6161 /* Table of all register names defined by default.  The user can
6162    define additional names with .req.  Note that all register names
6163    should appear in both upper and lowercase variants.	Some registers
6164    also have mixed-case names.	*/
6165 
6166 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6167 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6168 #define REGSET31(p,t) \
6169   REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6170   REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6171   REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6172   REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
6173   REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6174   REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6175   REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6176   REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6177 #define REGSET(p,t) \
6178   REGSET31(p,t), REGNUM(p,31,t)
6179 
6180 /* These go into aarch64_reg_hsh hash-table.  */
6181 static const reg_entry reg_names[] = {
6182   /* Integer registers.  */
6183   REGSET31 (x, R_64), REGSET31 (X, R_64),
6184   REGSET31 (w, R_32), REGSET31 (W, R_32),
6185 
6186   REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6187   REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6188 
6189   REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6190   REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6191 
6192   /* Coprocessor register numbers.  */
6193   REGSET (c, CN), REGSET (C, CN),
6194 
6195   /* Floating-point single precision registers.  */
6196   REGSET (s, FP_S), REGSET (S, FP_S),
6197 
6198   /* Floating-point double precision registers.  */
6199   REGSET (d, FP_D), REGSET (D, FP_D),
6200 
6201   /* Floating-point half precision registers.  */
6202   REGSET (h, FP_H), REGSET (H, FP_H),
6203 
6204   /* Floating-point byte precision registers.  */
6205   REGSET (b, FP_B), REGSET (B, FP_B),
6206 
6207   /* Floating-point quad precision registers.  */
6208   REGSET (q, FP_Q), REGSET (Q, FP_Q),
6209 
6210   /* FP/SIMD registers.  */
6211   REGSET (v, VN), REGSET (V, VN),
6212 };
6213 
6214 #undef REGDEF
6215 #undef REGNUM
6216 #undef REGSET
6217 
6218 #define N 1
6219 #define n 0
6220 #define Z 1
6221 #define z 0
6222 #define C 1
6223 #define c 0
6224 #define V 1
6225 #define v 0
6226 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6227 static const asm_nzcv nzcv_names[] = {
6228   {"nzcv", B (n, z, c, v)},
6229   {"nzcV", B (n, z, c, V)},
6230   {"nzCv", B (n, z, C, v)},
6231   {"nzCV", B (n, z, C, V)},
6232   {"nZcv", B (n, Z, c, v)},
6233   {"nZcV", B (n, Z, c, V)},
6234   {"nZCv", B (n, Z, C, v)},
6235   {"nZCV", B (n, Z, C, V)},
6236   {"Nzcv", B (N, z, c, v)},
6237   {"NzcV", B (N, z, c, V)},
6238   {"NzCv", B (N, z, C, v)},
6239   {"NzCV", B (N, z, C, V)},
6240   {"NZcv", B (N, Z, c, v)},
6241   {"NZcV", B (N, Z, c, V)},
6242   {"NZCv", B (N, Z, C, v)},
6243   {"NZCV", B (N, Z, C, V)}
6244 };
6245 
6246 #undef N
6247 #undef n
6248 #undef Z
6249 #undef z
6250 #undef C
6251 #undef c
6252 #undef V
6253 #undef v
6254 #undef B
6255 
6256 /* MD interface: bits in the object file.  */
6257 
6258 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6259    for use in the a.out file, and stores them in the array pointed to by buf.
6260    This knows about the endian-ness of the target machine and does
6261    THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
6262    2 (short) and 4 (long)  Floating numbers are put out as a series of
6263    LITTLENUMS (shorts, here at least).	*/
6264 
6265 void
md_number_to_chars(char * buf,valueT val,int n)6266 md_number_to_chars (char *buf, valueT val, int n)
6267 {
6268   if (target_big_endian)
6269     number_to_chars_bigendian (buf, val, n);
6270   else
6271     number_to_chars_littleendian (buf, val, n);
6272 }
6273 
6274 /* MD interface: Sections.  */
6275 
6276 /* Estimate the size of a frag before relaxing.  Assume everything fits in
6277    4 bytes.  */
6278 
6279 int
md_estimate_size_before_relax(fragS * fragp,segT segtype ATTRIBUTE_UNUSED)6280 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6281 {
6282   fragp->fr_var = 4;
6283   return 4;
6284 }
6285 
6286 /* Round up a section size to the appropriate boundary.	 */
6287 
6288 valueT
md_section_align(segT segment ATTRIBUTE_UNUSED,valueT size)6289 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6290 {
6291   return size;
6292 }
6293 
6294 /* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
6295    of an rs_align_code fragment.
6296 
6297    Here we fill the frag with the appropriate info for padding the
6298    output stream.  The resulting frag will consist of a fixed (fr_fix)
6299    and of a repeating (fr_var) part.
6300 
6301    The fixed content is always emitted before the repeating content and
6302    these two parts are used as follows in constructing the output:
6303    - the fixed part will be used to align to a valid instruction word
6304      boundary, in case that we start at a misaligned address; as no
6305      executable instruction can live at the misaligned location, we
6306      simply fill with zeros;
6307    - the variable part will be used to cover the remaining padding and
6308      we fill using the AArch64 NOP instruction.
6309 
6310    Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6311    enough storage space for up to 3 bytes for padding the back to a valid
6312    instruction alignment and exactly 4 bytes to store the NOP pattern.  */
6313 
6314 void
aarch64_handle_align(fragS * fragP)6315 aarch64_handle_align (fragS * fragP)
6316 {
6317   /* NOP = d503201f */
6318   /* AArch64 instructions are always little-endian.  */
6319   static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6320 
6321   int bytes, fix, noop_size;
6322   char *p;
6323 
6324   if (fragP->fr_type != rs_align_code)
6325     return;
6326 
6327   bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6328   p = fragP->fr_literal + fragP->fr_fix;
6329 
6330 #ifdef OBJ_ELF
6331   gas_assert (fragP->tc_frag_data.recorded);
6332 #endif
6333 
6334   noop_size = sizeof (aarch64_noop);
6335 
6336   fix = bytes & (noop_size - 1);
6337   if (fix)
6338     {
6339 #ifdef OBJ_ELF
6340       insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6341 #endif
6342       memset (p, 0, fix);
6343       p += fix;
6344       fragP->fr_fix += fix;
6345     }
6346 
6347   if (noop_size)
6348     memcpy (p, aarch64_noop, noop_size);
6349   fragP->fr_var = noop_size;
6350 }
6351 
6352 /* Perform target specific initialisation of a frag.
6353    Note - despite the name this initialisation is not done when the frag
6354    is created, but only when its type is assigned.  A frag can be created
6355    and used a long time before its type is set, so beware of assuming that
6356    this initialisationis performed first.  */
6357 
6358 #ifndef OBJ_ELF
6359 void
aarch64_init_frag(fragS * fragP ATTRIBUTE_UNUSED,int max_chars ATTRIBUTE_UNUSED)6360 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6361 		   int max_chars ATTRIBUTE_UNUSED)
6362 {
6363 }
6364 
6365 #else /* OBJ_ELF is defined.  */
6366 void
aarch64_init_frag(fragS * fragP,int max_chars)6367 aarch64_init_frag (fragS * fragP, int max_chars)
6368 {
6369   /* Record a mapping symbol for alignment frags.  We will delete this
6370      later if the alignment ends up empty.  */
6371   if (!fragP->tc_frag_data.recorded)
6372     fragP->tc_frag_data.recorded = 1;
6373 
6374   switch (fragP->fr_type)
6375     {
6376     case rs_align_test:
6377     case rs_fill:
6378       mapping_state_2 (MAP_DATA, max_chars);
6379       break;
6380     case rs_align:
6381       /* PR 20364: We can get alignment frags in code sections,
6382 	 so do not just assume that we should use the MAP_DATA state.  */
6383       mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
6384       break;
6385     case rs_align_code:
6386       mapping_state_2 (MAP_INSN, max_chars);
6387       break;
6388     default:
6389       break;
6390     }
6391 }
6392 
6393 /* Initialize the DWARF-2 unwind information for this procedure.  */
6394 
6395 void
tc_aarch64_frame_initial_instructions(void)6396 tc_aarch64_frame_initial_instructions (void)
6397 {
6398   cfi_add_CFA_def_cfa (REG_SP, 0);
6399 }
6400 #endif /* OBJ_ELF */
6401 
6402 /* Convert REGNAME to a DWARF-2 register number.  */
6403 
6404 int
tc_aarch64_regname_to_dw2regnum(char * regname)6405 tc_aarch64_regname_to_dw2regnum (char *regname)
6406 {
6407   const reg_entry *reg = parse_reg (&regname);
6408   if (reg == NULL)
6409     return -1;
6410 
6411   switch (reg->type)
6412     {
6413     case REG_TYPE_SP_32:
6414     case REG_TYPE_SP_64:
6415     case REG_TYPE_R_32:
6416     case REG_TYPE_R_64:
6417       return reg->number;
6418 
6419     case REG_TYPE_FP_B:
6420     case REG_TYPE_FP_H:
6421     case REG_TYPE_FP_S:
6422     case REG_TYPE_FP_D:
6423     case REG_TYPE_FP_Q:
6424       return reg->number + 64;
6425 
6426     default:
6427       break;
6428     }
6429   return -1;
6430 }
6431 
6432 /* Implement DWARF2_ADDR_SIZE.  */
6433 
6434 int
aarch64_dwarf2_addr_size(void)6435 aarch64_dwarf2_addr_size (void)
6436 {
6437 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6438   if (ilp32_p)
6439     return 4;
6440 #endif
6441   return bfd_arch_bits_per_address (stdoutput) / 8;
6442 }
6443 
6444 /* MD interface: Symbol and relocation handling.  */
6445 
6446 /* Return the address within the segment that a PC-relative fixup is
6447    relative to.  For AArch64 PC-relative fixups applied to instructions
6448    are generally relative to the location plus AARCH64_PCREL_OFFSET bytes.  */
6449 
6450 long
md_pcrel_from_section(fixS * fixP,segT seg)6451 md_pcrel_from_section (fixS * fixP, segT seg)
6452 {
6453   offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6454 
6455   /* If this is pc-relative and we are going to emit a relocation
6456      then we just want to put out any pipeline compensation that the linker
6457      will need.  Otherwise we want to use the calculated base.  */
6458   if (fixP->fx_pcrel
6459       && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6460 	  || aarch64_force_relocation (fixP)))
6461     base = 0;
6462 
6463   /* AArch64 should be consistent for all pc-relative relocations.  */
6464   return base + AARCH64_PCREL_OFFSET;
6465 }
6466 
6467 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6468    Otherwise we have no need to default values of symbols.  */
6469 
6470 symbolS *
md_undefined_symbol(char * name ATTRIBUTE_UNUSED)6471 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6472 {
6473 #ifdef OBJ_ELF
6474   if (name[0] == '_' && name[1] == 'G'
6475       && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6476     {
6477       if (!GOT_symbol)
6478 	{
6479 	  if (symbol_find (name))
6480 	    as_bad (_("GOT already in the symbol table"));
6481 
6482 	  GOT_symbol = symbol_new (name, undefined_section,
6483 				   (valueT) 0, &zero_address_frag);
6484 	}
6485 
6486       return GOT_symbol;
6487     }
6488 #endif
6489 
6490   return 0;
6491 }
6492 
6493 /* Return non-zero if the indicated VALUE has overflowed the maximum
6494    range expressible by a unsigned number with the indicated number of
6495    BITS.  */
6496 
6497 static bfd_boolean
unsigned_overflow(valueT value,unsigned bits)6498 unsigned_overflow (valueT value, unsigned bits)
6499 {
6500   valueT lim;
6501   if (bits >= sizeof (valueT) * 8)
6502     return FALSE;
6503   lim = (valueT) 1 << bits;
6504   return (value >= lim);
6505 }
6506 
6507 
6508 /* Return non-zero if the indicated VALUE has overflowed the maximum
6509    range expressible by an signed number with the indicated number of
6510    BITS.  */
6511 
6512 static bfd_boolean
signed_overflow(offsetT value,unsigned bits)6513 signed_overflow (offsetT value, unsigned bits)
6514 {
6515   offsetT lim;
6516   if (bits >= sizeof (offsetT) * 8)
6517     return FALSE;
6518   lim = (offsetT) 1 << (bits - 1);
6519   return (value < -lim || value >= lim);
6520 }
6521 
6522 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6523    unsigned immediate offset load/store instruction, try to encode it as
6524    an unscaled, 9-bit, signed immediate offset load/store instruction.
6525    Return TRUE if it is successful; otherwise return FALSE.
6526 
6527    As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6528    in response to the standard LDR/STR mnemonics when the immediate offset is
6529    unambiguous, i.e. when it is negative or unaligned.  */
6530 
6531 static bfd_boolean
try_to_encode_as_unscaled_ldst(aarch64_inst * instr)6532 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6533 {
6534   int idx;
6535   enum aarch64_op new_op;
6536   const aarch64_opcode *new_opcode;
6537 
6538   gas_assert (instr->opcode->iclass == ldst_pos);
6539 
6540   switch (instr->opcode->op)
6541     {
6542     case OP_LDRB_POS:new_op = OP_LDURB; break;
6543     case OP_STRB_POS: new_op = OP_STURB; break;
6544     case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6545     case OP_LDRH_POS: new_op = OP_LDURH; break;
6546     case OP_STRH_POS: new_op = OP_STURH; break;
6547     case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6548     case OP_LDR_POS: new_op = OP_LDUR; break;
6549     case OP_STR_POS: new_op = OP_STUR; break;
6550     case OP_LDRF_POS: new_op = OP_LDURV; break;
6551     case OP_STRF_POS: new_op = OP_STURV; break;
6552     case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6553     case OP_PRFM_POS: new_op = OP_PRFUM; break;
6554     default: new_op = OP_NIL; break;
6555     }
6556 
6557   if (new_op == OP_NIL)
6558     return FALSE;
6559 
6560   new_opcode = aarch64_get_opcode (new_op);
6561   gas_assert (new_opcode != NULL);
6562 
6563   DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6564 	       instr->opcode->op, new_opcode->op);
6565 
6566   aarch64_replace_opcode (instr, new_opcode);
6567 
6568   /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6569      qualifier matching may fail because the out-of-date qualifier will
6570      prevent the operand being updated with a new and correct qualifier.  */
6571   idx = aarch64_operand_index (instr->opcode->operands,
6572 			       AARCH64_OPND_ADDR_SIMM9);
6573   gas_assert (idx == 1);
6574   instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6575 
6576   DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6577 
6578   if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6579     return FALSE;
6580 
6581   return TRUE;
6582 }
6583 
6584 /* Called by fix_insn to fix a MOV immediate alias instruction.
6585 
6586    Operand for a generic move immediate instruction, which is an alias
6587    instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6588    a 32-bit/64-bit immediate value into general register.  An assembler error
6589    shall result if the immediate cannot be created by a single one of these
6590    instructions. If there is a choice, then to ensure reversability an
6591    assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR.  */
6592 
6593 static void
fix_mov_imm_insn(fixS * fixP,char * buf,aarch64_inst * instr,offsetT value)6594 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6595 {
6596   const aarch64_opcode *opcode;
6597 
6598   /* Need to check if the destination is SP/ZR.  The check has to be done
6599      before any aarch64_replace_opcode.  */
6600   int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6601   int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6602 
6603   instr->operands[1].imm.value = value;
6604   instr->operands[1].skip = 0;
6605 
6606   if (try_mov_wide_p)
6607     {
6608       /* Try the MOVZ alias.  */
6609       opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6610       aarch64_replace_opcode (instr, opcode);
6611       if (aarch64_opcode_encode (instr->opcode, instr,
6612 				 &instr->value, NULL, NULL))
6613 	{
6614 	  put_aarch64_insn (buf, instr->value);
6615 	  return;
6616 	}
6617       /* Try the MOVK alias.  */
6618       opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6619       aarch64_replace_opcode (instr, opcode);
6620       if (aarch64_opcode_encode (instr->opcode, instr,
6621 				 &instr->value, NULL, NULL))
6622 	{
6623 	  put_aarch64_insn (buf, instr->value);
6624 	  return;
6625 	}
6626     }
6627 
6628   if (try_mov_bitmask_p)
6629     {
6630       /* Try the ORR alias.  */
6631       opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6632       aarch64_replace_opcode (instr, opcode);
6633       if (aarch64_opcode_encode (instr->opcode, instr,
6634 				 &instr->value, NULL, NULL))
6635 	{
6636 	  put_aarch64_insn (buf, instr->value);
6637 	  return;
6638 	}
6639     }
6640 
6641   as_bad_where (fixP->fx_file, fixP->fx_line,
6642 		_("immediate cannot be moved by a single instruction"));
6643 }
6644 
6645 /* An instruction operand which is immediate related may have symbol used
6646    in the assembly, e.g.
6647 
6648      mov     w0, u32
6649      .set    u32,    0x00ffff00
6650 
6651    At the time when the assembly instruction is parsed, a referenced symbol,
6652    like 'u32' in the above example may not have been seen; a fixS is created
6653    in such a case and is handled here after symbols have been resolved.
6654    Instruction is fixed up with VALUE using the information in *FIXP plus
6655    extra information in FLAGS.
6656 
6657    This function is called by md_apply_fix to fix up instructions that need
6658    a fix-up described above but does not involve any linker-time relocation.  */
6659 
6660 static void
fix_insn(fixS * fixP,uint32_t flags,offsetT value)6661 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6662 {
6663   int idx;
6664   uint32_t insn;
6665   char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6666   enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6667   aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6668 
6669   if (new_inst)
6670     {
6671       /* Now the instruction is about to be fixed-up, so the operand that
6672 	 was previously marked as 'ignored' needs to be unmarked in order
6673 	 to get the encoding done properly.  */
6674       idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6675       new_inst->operands[idx].skip = 0;
6676     }
6677 
6678   gas_assert (opnd != AARCH64_OPND_NIL);
6679 
6680   switch (opnd)
6681     {
6682     case AARCH64_OPND_EXCEPTION:
6683       if (unsigned_overflow (value, 16))
6684 	as_bad_where (fixP->fx_file, fixP->fx_line,
6685 		      _("immediate out of range"));
6686       insn = get_aarch64_insn (buf);
6687       insn |= encode_svc_imm (value);
6688       put_aarch64_insn (buf, insn);
6689       break;
6690 
6691     case AARCH64_OPND_AIMM:
6692       /* ADD or SUB with immediate.
6693 	 NOTE this assumes we come here with a add/sub shifted reg encoding
6694 		  3  322|2222|2  2  2 21111 111111
6695 		  1  098|7654|3  2  1 09876 543210 98765 43210
6696 	 0b000000 sf 000|1011|shift 0 Rm    imm6   Rn    Rd    ADD
6697 	 2b000000 sf 010|1011|shift 0 Rm    imm6   Rn    Rd    ADDS
6698 	 4b000000 sf 100|1011|shift 0 Rm    imm6   Rn    Rd    SUB
6699 	 6b000000 sf 110|1011|shift 0 Rm    imm6   Rn    Rd    SUBS
6700 	 ->
6701 		  3  322|2222|2 2   221111111111
6702 		  1  098|7654|3 2   109876543210 98765 43210
6703 	 11000000 sf 001|0001|shift imm12        Rn    Rd    ADD
6704 	 31000000 sf 011|0001|shift imm12        Rn    Rd    ADDS
6705 	 51000000 sf 101|0001|shift imm12        Rn    Rd    SUB
6706 	 71000000 sf 111|0001|shift imm12        Rn    Rd    SUBS
6707 	 Fields sf Rn Rd are already set.  */
6708       insn = get_aarch64_insn (buf);
6709       if (value < 0)
6710 	{
6711 	  /* Add <-> sub.  */
6712 	  insn = reencode_addsub_switch_add_sub (insn);
6713 	  value = -value;
6714 	}
6715 
6716       if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6717 	  && unsigned_overflow (value, 12))
6718 	{
6719 	  /* Try to shift the value by 12 to make it fit.  */
6720 	  if (((value >> 12) << 12) == value
6721 	      && ! unsigned_overflow (value, 12 + 12))
6722 	    {
6723 	      value >>= 12;
6724 	      insn |= encode_addsub_imm_shift_amount (1);
6725 	    }
6726 	}
6727 
6728       if (unsigned_overflow (value, 12))
6729 	as_bad_where (fixP->fx_file, fixP->fx_line,
6730 		      _("immediate out of range"));
6731 
6732       insn |= encode_addsub_imm (value);
6733 
6734       put_aarch64_insn (buf, insn);
6735       break;
6736 
6737     case AARCH64_OPND_SIMD_IMM:
6738     case AARCH64_OPND_SIMD_IMM_SFT:
6739     case AARCH64_OPND_LIMM:
6740       /* Bit mask immediate.  */
6741       gas_assert (new_inst != NULL);
6742       idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6743       new_inst->operands[idx].imm.value = value;
6744       if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6745 				 &new_inst->value, NULL, NULL))
6746 	put_aarch64_insn (buf, new_inst->value);
6747       else
6748 	as_bad_where (fixP->fx_file, fixP->fx_line,
6749 		      _("invalid immediate"));
6750       break;
6751 
6752     case AARCH64_OPND_HALF:
6753       /* 16-bit unsigned immediate.  */
6754       if (unsigned_overflow (value, 16))
6755 	as_bad_where (fixP->fx_file, fixP->fx_line,
6756 		      _("immediate out of range"));
6757       insn = get_aarch64_insn (buf);
6758       insn |= encode_movw_imm (value & 0xffff);
6759       put_aarch64_insn (buf, insn);
6760       break;
6761 
6762     case AARCH64_OPND_IMM_MOV:
6763       /* Operand for a generic move immediate instruction, which is
6764 	 an alias instruction that generates a single MOVZ, MOVN or ORR
6765 	 instruction to loads a 32-bit/64-bit immediate value into general
6766 	 register.  An assembler error shall result if the immediate cannot be
6767 	 created by a single one of these instructions. If there is a choice,
6768 	 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6769 	 and MOVZ or MOVN to ORR.  */
6770       gas_assert (new_inst != NULL);
6771       fix_mov_imm_insn (fixP, buf, new_inst, value);
6772       break;
6773 
6774     case AARCH64_OPND_ADDR_SIMM7:
6775     case AARCH64_OPND_ADDR_SIMM9:
6776     case AARCH64_OPND_ADDR_SIMM9_2:
6777     case AARCH64_OPND_ADDR_UIMM12:
6778       /* Immediate offset in an address.  */
6779       insn = get_aarch64_insn (buf);
6780 
6781       gas_assert (new_inst != NULL && new_inst->value == insn);
6782       gas_assert (new_inst->opcode->operands[1] == opnd
6783 		  || new_inst->opcode->operands[2] == opnd);
6784 
6785       /* Get the index of the address operand.  */
6786       if (new_inst->opcode->operands[1] == opnd)
6787 	/* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
6788 	idx = 1;
6789       else
6790 	/* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}].  */
6791 	idx = 2;
6792 
6793       /* Update the resolved offset value.  */
6794       new_inst->operands[idx].addr.offset.imm = value;
6795 
6796       /* Encode/fix-up.  */
6797       if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6798 				 &new_inst->value, NULL, NULL))
6799 	{
6800 	  put_aarch64_insn (buf, new_inst->value);
6801 	  break;
6802 	}
6803       else if (new_inst->opcode->iclass == ldst_pos
6804 	       && try_to_encode_as_unscaled_ldst (new_inst))
6805 	{
6806 	  put_aarch64_insn (buf, new_inst->value);
6807 	  break;
6808 	}
6809 
6810       as_bad_where (fixP->fx_file, fixP->fx_line,
6811 		    _("immediate offset out of range"));
6812       break;
6813 
6814     default:
6815       gas_assert (0);
6816       as_fatal (_("unhandled operand code %d"), opnd);
6817     }
6818 }
6819 
6820 /* Apply a fixup (fixP) to segment data, once it has been determined
6821    by our caller that we have all the info we need to fix it up.
6822 
6823    Parameter valP is the pointer to the value of the bits.  */
6824 
6825 void
md_apply_fix(fixS * fixP,valueT * valP,segT seg)6826 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6827 {
6828   offsetT value = *valP;
6829   uint32_t insn;
6830   char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6831   int scale;
6832   unsigned flags = fixP->fx_addnumber;
6833 
6834   DEBUG_TRACE ("\n\n");
6835   DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6836   DEBUG_TRACE ("Enter md_apply_fix");
6837 
6838   gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6839 
6840   /* Note whether this will delete the relocation.  */
6841 
6842   if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6843     fixP->fx_done = 1;
6844 
6845   /* Process the relocations.  */
6846   switch (fixP->fx_r_type)
6847     {
6848     case BFD_RELOC_NONE:
6849       /* This will need to go in the object file.  */
6850       fixP->fx_done = 0;
6851       break;
6852 
6853     case BFD_RELOC_8:
6854     case BFD_RELOC_8_PCREL:
6855       if (fixP->fx_done || !seg->use_rela_p)
6856 	md_number_to_chars (buf, value, 1);
6857       break;
6858 
6859     case BFD_RELOC_16:
6860     case BFD_RELOC_16_PCREL:
6861       if (fixP->fx_done || !seg->use_rela_p)
6862 	md_number_to_chars (buf, value, 2);
6863       break;
6864 
6865     case BFD_RELOC_32:
6866     case BFD_RELOC_32_PCREL:
6867       if (fixP->fx_done || !seg->use_rela_p)
6868 	md_number_to_chars (buf, value, 4);
6869       break;
6870 
6871     case BFD_RELOC_64:
6872     case BFD_RELOC_64_PCREL:
6873       if (fixP->fx_done || !seg->use_rela_p)
6874 	md_number_to_chars (buf, value, 8);
6875       break;
6876 
6877     case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6878       /* We claim that these fixups have been processed here, even if
6879          in fact we generate an error because we do not have a reloc
6880          for them, so tc_gen_reloc() will reject them.  */
6881       fixP->fx_done = 1;
6882       if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6883 	{
6884 	  as_bad_where (fixP->fx_file, fixP->fx_line,
6885 			_("undefined symbol %s used as an immediate value"),
6886 			S_GET_NAME (fixP->fx_addsy));
6887 	  goto apply_fix_return;
6888 	}
6889       fix_insn (fixP, flags, value);
6890       break;
6891 
6892     case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6893       if (fixP->fx_done || !seg->use_rela_p)
6894 	{
6895 	  if (value & 3)
6896 	    as_bad_where (fixP->fx_file, fixP->fx_line,
6897 			  _("pc-relative load offset not word aligned"));
6898 	  if (signed_overflow (value, 21))
6899 	    as_bad_where (fixP->fx_file, fixP->fx_line,
6900 			  _("pc-relative load offset out of range"));
6901 	  insn = get_aarch64_insn (buf);
6902 	  insn |= encode_ld_lit_ofs_19 (value >> 2);
6903 	  put_aarch64_insn (buf, insn);
6904 	}
6905       break;
6906 
6907     case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6908       if (fixP->fx_done || !seg->use_rela_p)
6909 	{
6910 	  if (signed_overflow (value, 21))
6911 	    as_bad_where (fixP->fx_file, fixP->fx_line,
6912 			  _("pc-relative address offset out of range"));
6913 	  insn = get_aarch64_insn (buf);
6914 	  insn |= encode_adr_imm (value);
6915 	  put_aarch64_insn (buf, insn);
6916 	}
6917       break;
6918 
6919     case BFD_RELOC_AARCH64_BRANCH19:
6920       if (fixP->fx_done || !seg->use_rela_p)
6921 	{
6922 	  if (value & 3)
6923 	    as_bad_where (fixP->fx_file, fixP->fx_line,
6924 			  _("conditional branch target not word aligned"));
6925 	  if (signed_overflow (value, 21))
6926 	    as_bad_where (fixP->fx_file, fixP->fx_line,
6927 			  _("conditional branch out of range"));
6928 	  insn = get_aarch64_insn (buf);
6929 	  insn |= encode_cond_branch_ofs_19 (value >> 2);
6930 	  put_aarch64_insn (buf, insn);
6931 	}
6932       break;
6933 
6934     case BFD_RELOC_AARCH64_TSTBR14:
6935       if (fixP->fx_done || !seg->use_rela_p)
6936 	{
6937 	  if (value & 3)
6938 	    as_bad_where (fixP->fx_file, fixP->fx_line,
6939 			  _("conditional branch target not word aligned"));
6940 	  if (signed_overflow (value, 16))
6941 	    as_bad_where (fixP->fx_file, fixP->fx_line,
6942 			  _("conditional branch out of range"));
6943 	  insn = get_aarch64_insn (buf);
6944 	  insn |= encode_tst_branch_ofs_14 (value >> 2);
6945 	  put_aarch64_insn (buf, insn);
6946 	}
6947       break;
6948 
6949     case BFD_RELOC_AARCH64_CALL26:
6950     case BFD_RELOC_AARCH64_JUMP26:
6951       if (fixP->fx_done || !seg->use_rela_p)
6952 	{
6953 	  if (value & 3)
6954 	    as_bad_where (fixP->fx_file, fixP->fx_line,
6955 			  _("branch target not word aligned"));
6956 	  if (signed_overflow (value, 28))
6957 	    as_bad_where (fixP->fx_file, fixP->fx_line,
6958 			  _("branch out of range"));
6959 	  insn = get_aarch64_insn (buf);
6960 	  insn |= encode_branch_ofs_26 (value >> 2);
6961 	  put_aarch64_insn (buf, insn);
6962 	}
6963       break;
6964 
6965     case BFD_RELOC_AARCH64_MOVW_G0:
6966     case BFD_RELOC_AARCH64_MOVW_G0_NC:
6967     case BFD_RELOC_AARCH64_MOVW_G0_S:
6968     case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6969       scale = 0;
6970       goto movw_common;
6971     case BFD_RELOC_AARCH64_MOVW_G1:
6972     case BFD_RELOC_AARCH64_MOVW_G1_NC:
6973     case BFD_RELOC_AARCH64_MOVW_G1_S:
6974     case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6975       scale = 16;
6976       goto movw_common;
6977     case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6978       scale = 0;
6979       S_SET_THREAD_LOCAL (fixP->fx_addsy);
6980       /* Should always be exported to object file, see
6981 	 aarch64_force_relocation().  */
6982       gas_assert (!fixP->fx_done);
6983       gas_assert (seg->use_rela_p);
6984       goto movw_common;
6985     case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6986       scale = 16;
6987       S_SET_THREAD_LOCAL (fixP->fx_addsy);
6988       /* Should always be exported to object file, see
6989 	 aarch64_force_relocation().  */
6990       gas_assert (!fixP->fx_done);
6991       gas_assert (seg->use_rela_p);
6992       goto movw_common;
6993     case BFD_RELOC_AARCH64_MOVW_G2:
6994     case BFD_RELOC_AARCH64_MOVW_G2_NC:
6995     case BFD_RELOC_AARCH64_MOVW_G2_S:
6996       scale = 32;
6997       goto movw_common;
6998     case BFD_RELOC_AARCH64_MOVW_G3:
6999       scale = 48;
7000     movw_common:
7001       if (fixP->fx_done || !seg->use_rela_p)
7002 	{
7003 	  insn = get_aarch64_insn (buf);
7004 
7005 	  if (!fixP->fx_done)
7006 	    {
7007 	      /* REL signed addend must fit in 16 bits */
7008 	      if (signed_overflow (value, 16))
7009 		as_bad_where (fixP->fx_file, fixP->fx_line,
7010 			      _("offset out of range"));
7011 	    }
7012 	  else
7013 	    {
7014 	      /* Check for overflow and scale. */
7015 	      switch (fixP->fx_r_type)
7016 		{
7017 		case BFD_RELOC_AARCH64_MOVW_G0:
7018 		case BFD_RELOC_AARCH64_MOVW_G1:
7019 		case BFD_RELOC_AARCH64_MOVW_G2:
7020 		case BFD_RELOC_AARCH64_MOVW_G3:
7021 		case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7022 		case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7023 		  if (unsigned_overflow (value, scale + 16))
7024 		    as_bad_where (fixP->fx_file, fixP->fx_line,
7025 				  _("unsigned value out of range"));
7026 		  break;
7027 		case BFD_RELOC_AARCH64_MOVW_G0_S:
7028 		case BFD_RELOC_AARCH64_MOVW_G1_S:
7029 		case BFD_RELOC_AARCH64_MOVW_G2_S:
7030 		  /* NOTE: We can only come here with movz or movn. */
7031 		  if (signed_overflow (value, scale + 16))
7032 		    as_bad_where (fixP->fx_file, fixP->fx_line,
7033 				  _("signed value out of range"));
7034 		  if (value < 0)
7035 		    {
7036 		      /* Force use of MOVN.  */
7037 		      value = ~value;
7038 		      insn = reencode_movzn_to_movn (insn);
7039 		    }
7040 		  else
7041 		    {
7042 		      /* Force use of MOVZ.  */
7043 		      insn = reencode_movzn_to_movz (insn);
7044 		    }
7045 		  break;
7046 		default:
7047 		  /* Unchecked relocations.  */
7048 		  break;
7049 		}
7050 	      value >>= scale;
7051 	    }
7052 
7053 	  /* Insert value into MOVN/MOVZ/MOVK instruction. */
7054 	  insn |= encode_movw_imm (value & 0xffff);
7055 
7056 	  put_aarch64_insn (buf, insn);
7057 	}
7058       break;
7059 
7060     case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7061       fixP->fx_r_type = (ilp32_p
7062 			 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7063 			 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7064       S_SET_THREAD_LOCAL (fixP->fx_addsy);
7065       /* Should always be exported to object file, see
7066 	 aarch64_force_relocation().  */
7067       gas_assert (!fixP->fx_done);
7068       gas_assert (seg->use_rela_p);
7069       break;
7070 
7071     case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7072       fixP->fx_r_type = (ilp32_p
7073 			 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7074 			 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
7075       S_SET_THREAD_LOCAL (fixP->fx_addsy);
7076       /* Should always be exported to object file, see
7077 	 aarch64_force_relocation().  */
7078       gas_assert (!fixP->fx_done);
7079       gas_assert (seg->use_rela_p);
7080       break;
7081 
7082     case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7083     case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7084     case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7085     case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7086     case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7087     case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7088     case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7089     case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7090     case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7091     case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7092     case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7093     case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7094     case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7095     case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7096     case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7097     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7098     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7099     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7100     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7101     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7102     case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7103     case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7104     case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7105     case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7106     case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7107     case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7108     case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7109     case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7110     case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7111     case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7112     case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7113     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7114     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7115     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7116     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7117     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7118     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7119     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7120     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7121     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7122     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7123     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7124     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7125     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7126       S_SET_THREAD_LOCAL (fixP->fx_addsy);
7127       /* Should always be exported to object file, see
7128 	 aarch64_force_relocation().  */
7129       gas_assert (!fixP->fx_done);
7130       gas_assert (seg->use_rela_p);
7131       break;
7132 
7133     case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7134       /* Should always be exported to object file, see
7135 	 aarch64_force_relocation().  */
7136       fixP->fx_r_type = (ilp32_p
7137 			 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7138 			 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7139       gas_assert (!fixP->fx_done);
7140       gas_assert (seg->use_rela_p);
7141       break;
7142 
7143     case BFD_RELOC_AARCH64_ADD_LO12:
7144     case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7145     case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7146     case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7147     case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7148     case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7149     case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7150     case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7151     case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7152     case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7153     case BFD_RELOC_AARCH64_LDST128_LO12:
7154     case BFD_RELOC_AARCH64_LDST16_LO12:
7155     case BFD_RELOC_AARCH64_LDST32_LO12:
7156     case BFD_RELOC_AARCH64_LDST64_LO12:
7157     case BFD_RELOC_AARCH64_LDST8_LO12:
7158       /* Should always be exported to object file, see
7159 	 aarch64_force_relocation().  */
7160       gas_assert (!fixP->fx_done);
7161       gas_assert (seg->use_rela_p);
7162       break;
7163 
7164     case BFD_RELOC_AARCH64_TLSDESC_ADD:
7165     case BFD_RELOC_AARCH64_TLSDESC_CALL:
7166     case BFD_RELOC_AARCH64_TLSDESC_LDR:
7167       break;
7168 
7169     case BFD_RELOC_UNUSED:
7170       /* An error will already have been reported.  */
7171       break;
7172 
7173     default:
7174       as_bad_where (fixP->fx_file, fixP->fx_line,
7175 		    _("unexpected %s fixup"),
7176 		    bfd_get_reloc_code_name (fixP->fx_r_type));
7177       break;
7178     }
7179 
7180 apply_fix_return:
7181   /* Free the allocated the struct aarch64_inst.
7182      N.B. currently there are very limited number of fix-up types actually use
7183      this field, so the impact on the performance should be minimal .  */
7184   if (fixP->tc_fix_data.inst != NULL)
7185     free (fixP->tc_fix_data.inst);
7186 
7187   return;
7188 }
7189 
7190 /* Translate internal representation of relocation info to BFD target
7191    format.  */
7192 
7193 arelent *
tc_gen_reloc(asection * section,fixS * fixp)7194 tc_gen_reloc (asection * section, fixS * fixp)
7195 {
7196   arelent *reloc;
7197   bfd_reloc_code_real_type code;
7198 
7199   reloc = XNEW (arelent);
7200 
7201   reloc->sym_ptr_ptr = XNEW (asymbol *);
7202   *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7203   reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7204 
7205   if (fixp->fx_pcrel)
7206     {
7207       if (section->use_rela_p)
7208 	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7209       else
7210 	fixp->fx_offset = reloc->address;
7211     }
7212   reloc->addend = fixp->fx_offset;
7213 
7214   code = fixp->fx_r_type;
7215   switch (code)
7216     {
7217     case BFD_RELOC_16:
7218       if (fixp->fx_pcrel)
7219 	code = BFD_RELOC_16_PCREL;
7220       break;
7221 
7222     case BFD_RELOC_32:
7223       if (fixp->fx_pcrel)
7224 	code = BFD_RELOC_32_PCREL;
7225       break;
7226 
7227     case BFD_RELOC_64:
7228       if (fixp->fx_pcrel)
7229 	code = BFD_RELOC_64_PCREL;
7230       break;
7231 
7232     default:
7233       break;
7234     }
7235 
7236   reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7237   if (reloc->howto == NULL)
7238     {
7239       as_bad_where (fixp->fx_file, fixp->fx_line,
7240 		    _
7241 		    ("cannot represent %s relocation in this object file format"),
7242 		    bfd_get_reloc_code_name (code));
7243       return NULL;
7244     }
7245 
7246   return reloc;
7247 }
7248 
7249 /* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
7250 
7251 void
cons_fix_new_aarch64(fragS * frag,int where,int size,expressionS * exp)7252 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7253 {
7254   bfd_reloc_code_real_type type;
7255   int pcrel = 0;
7256 
7257   /* Pick a reloc.
7258      FIXME: @@ Should look at CPU word size.  */
7259   switch (size)
7260     {
7261     case 1:
7262       type = BFD_RELOC_8;
7263       break;
7264     case 2:
7265       type = BFD_RELOC_16;
7266       break;
7267     case 4:
7268       type = BFD_RELOC_32;
7269       break;
7270     case 8:
7271       type = BFD_RELOC_64;
7272       break;
7273     default:
7274       as_bad (_("cannot do %u-byte relocation"), size);
7275       type = BFD_RELOC_UNUSED;
7276       break;
7277     }
7278 
7279   fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7280 }
7281 
7282 int
aarch64_force_relocation(struct fix * fixp)7283 aarch64_force_relocation (struct fix *fixp)
7284 {
7285   switch (fixp->fx_r_type)
7286     {
7287     case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7288       /* Perform these "immediate" internal relocations
7289          even if the symbol is extern or weak.  */
7290       return 0;
7291 
7292     case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7293     case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7294     case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7295       /* Pseudo relocs that need to be fixed up according to
7296 	 ilp32_p.  */
7297       return 0;
7298 
7299     case BFD_RELOC_AARCH64_ADD_LO12:
7300     case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7301     case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7302     case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7303     case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7304     case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7305     case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7306     case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7307     case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7308     case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7309     case BFD_RELOC_AARCH64_LDST128_LO12:
7310     case BFD_RELOC_AARCH64_LDST16_LO12:
7311     case BFD_RELOC_AARCH64_LDST32_LO12:
7312     case BFD_RELOC_AARCH64_LDST64_LO12:
7313     case BFD_RELOC_AARCH64_LDST8_LO12:
7314     case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7315     case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7316     case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7317     case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7318     case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7319     case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7320     case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7321     case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7322     case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7323     case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7324     case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7325     case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7326     case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7327     case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7328     case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7329     case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7330     case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7331     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7332     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7333    case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7334     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7335     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7336     case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7337     case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7338     case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7339     case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7340     case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7341     case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7342     case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7343     case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7344     case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7345     case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7346     case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7347     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7348     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7349     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7350     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7351     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7352     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7353     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7354     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7355     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7356     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7357     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7358     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7359     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7360       /* Always leave these relocations for the linker.  */
7361       return 1;
7362 
7363     default:
7364       break;
7365     }
7366 
7367   return generic_force_reloc (fixp);
7368 }
7369 
7370 #ifdef OBJ_ELF
7371 
7372 const char *
elf64_aarch64_target_format(void)7373 elf64_aarch64_target_format (void)
7374 {
7375   if (strcmp (TARGET_OS, "cloudabi") == 0)
7376     {
7377       /* FIXME: What to do for ilp32_p ?  */
7378       return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
7379     }
7380   if (target_big_endian)
7381     return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7382   else
7383     return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7384 }
7385 
7386 void
aarch64elf_frob_symbol(symbolS * symp,int * puntp)7387 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7388 {
7389   elf_frob_symbol (symp, puntp);
7390 }
7391 #endif
7392 
7393 /* MD interface: Finalization.	*/
7394 
7395 /* A good place to do this, although this was probably not intended
7396    for this kind of use.  We need to dump the literal pool before
7397    references are made to a null symbol pointer.  */
7398 
7399 void
aarch64_cleanup(void)7400 aarch64_cleanup (void)
7401 {
7402   literal_pool *pool;
7403 
7404   for (pool = list_of_pools; pool; pool = pool->next)
7405     {
7406       /* Put it at the end of the relevant section.  */
7407       subseg_set (pool->section, pool->sub_section);
7408       s_ltorg (0);
7409     }
7410 }
7411 
7412 #ifdef OBJ_ELF
7413 /* Remove any excess mapping symbols generated for alignment frags in
7414    SEC.  We may have created a mapping symbol before a zero byte
7415    alignment; remove it if there's a mapping symbol after the
7416    alignment.  */
7417 static void
check_mapping_symbols(bfd * abfd ATTRIBUTE_UNUSED,asection * sec,void * dummy ATTRIBUTE_UNUSED)7418 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7419 		       void *dummy ATTRIBUTE_UNUSED)
7420 {
7421   segment_info_type *seginfo = seg_info (sec);
7422   fragS *fragp;
7423 
7424   if (seginfo == NULL || seginfo->frchainP == NULL)
7425     return;
7426 
7427   for (fragp = seginfo->frchainP->frch_root;
7428        fragp != NULL; fragp = fragp->fr_next)
7429     {
7430       symbolS *sym = fragp->tc_frag_data.last_map;
7431       fragS *next = fragp->fr_next;
7432 
7433       /* Variable-sized frags have been converted to fixed size by
7434          this point.  But if this was variable-sized to start with,
7435          there will be a fixed-size frag after it.  So don't handle
7436          next == NULL.  */
7437       if (sym == NULL || next == NULL)
7438 	continue;
7439 
7440       if (S_GET_VALUE (sym) < next->fr_address)
7441 	/* Not at the end of this frag.  */
7442 	continue;
7443       know (S_GET_VALUE (sym) == next->fr_address);
7444 
7445       do
7446 	{
7447 	  if (next->tc_frag_data.first_map != NULL)
7448 	    {
7449 	      /* Next frag starts with a mapping symbol.  Discard this
7450 	         one.  */
7451 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7452 	      break;
7453 	    }
7454 
7455 	  if (next->fr_next == NULL)
7456 	    {
7457 	      /* This mapping symbol is at the end of the section.  Discard
7458 	         it.  */
7459 	      know (next->fr_fix == 0 && next->fr_var == 0);
7460 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7461 	      break;
7462 	    }
7463 
7464 	  /* As long as we have empty frags without any mapping symbols,
7465 	     keep looking.  */
7466 	  /* If the next frag is non-empty and does not start with a
7467 	     mapping symbol, then this mapping symbol is required.  */
7468 	  if (next->fr_address != next->fr_next->fr_address)
7469 	    break;
7470 
7471 	  next = next->fr_next;
7472 	}
7473       while (next != NULL);
7474     }
7475 }
7476 #endif
7477 
7478 /* Adjust the symbol table.  */
7479 
7480 void
aarch64_adjust_symtab(void)7481 aarch64_adjust_symtab (void)
7482 {
7483 #ifdef OBJ_ELF
7484   /* Remove any overlapping mapping symbols generated by alignment frags.  */
7485   bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7486   /* Now do generic ELF adjustments.  */
7487   elf_adjust_symtab ();
7488 #endif
7489 }
7490 
7491 static void
checked_hash_insert(struct hash_control * table,const char * key,void * value)7492 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7493 {
7494   const char *hash_err;
7495 
7496   hash_err = hash_insert (table, key, value);
7497   if (hash_err)
7498     printf ("Internal Error:  Can't hash %s\n", key);
7499 }
7500 
7501 static void
fill_instruction_hash_table(void)7502 fill_instruction_hash_table (void)
7503 {
7504   aarch64_opcode *opcode = aarch64_opcode_table;
7505 
7506   while (opcode->name != NULL)
7507     {
7508       templates *templ, *new_templ;
7509       templ = hash_find (aarch64_ops_hsh, opcode->name);
7510 
7511       new_templ = XNEW (templates);
7512       new_templ->opcode = opcode;
7513       new_templ->next = NULL;
7514 
7515       if (!templ)
7516 	checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7517       else
7518 	{
7519 	  new_templ->next = templ->next;
7520 	  templ->next = new_templ;
7521 	}
7522       ++opcode;
7523     }
7524 }
7525 
7526 static inline void
convert_to_upper(char * dst,const char * src,size_t num)7527 convert_to_upper (char *dst, const char *src, size_t num)
7528 {
7529   unsigned int i;
7530   for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7531     *dst = TOUPPER (*src);
7532   *dst = '\0';
7533 }
7534 
7535 /* Assume STR point to a lower-case string, allocate, convert and return
7536    the corresponding upper-case string.  */
7537 static inline const char*
get_upper_str(const char * str)7538 get_upper_str (const char *str)
7539 {
7540   char *ret;
7541   size_t len = strlen (str);
7542   ret = XNEWVEC (char, len + 1);
7543   convert_to_upper (ret, str, len);
7544   return ret;
7545 }
7546 
7547 /* MD interface: Initialization.  */
7548 
7549 void
md_begin(void)7550 md_begin (void)
7551 {
7552   unsigned mach;
7553   unsigned int i;
7554 
7555   if ((aarch64_ops_hsh = hash_new ()) == NULL
7556       || (aarch64_cond_hsh = hash_new ()) == NULL
7557       || (aarch64_shift_hsh = hash_new ()) == NULL
7558       || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7559       || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7560       || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7561       || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7562       || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7563       || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7564       || (aarch64_reg_hsh = hash_new ()) == NULL
7565       || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7566       || (aarch64_nzcv_hsh = hash_new ()) == NULL
7567       || (aarch64_pldop_hsh = hash_new ()) == NULL
7568       || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
7569     as_fatal (_("virtual memory exhausted"));
7570 
7571   fill_instruction_hash_table ();
7572 
7573   for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7574     checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7575 			 (void *) (aarch64_sys_regs + i));
7576 
7577   for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7578     checked_hash_insert (aarch64_pstatefield_hsh,
7579 			 aarch64_pstatefields[i].name,
7580 			 (void *) (aarch64_pstatefields + i));
7581 
7582   for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
7583     checked_hash_insert (aarch64_sys_regs_ic_hsh,
7584 			 aarch64_sys_regs_ic[i].name,
7585 			 (void *) (aarch64_sys_regs_ic + i));
7586 
7587   for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
7588     checked_hash_insert (aarch64_sys_regs_dc_hsh,
7589 			 aarch64_sys_regs_dc[i].name,
7590 			 (void *) (aarch64_sys_regs_dc + i));
7591 
7592   for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
7593     checked_hash_insert (aarch64_sys_regs_at_hsh,
7594 			 aarch64_sys_regs_at[i].name,
7595 			 (void *) (aarch64_sys_regs_at + i));
7596 
7597   for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
7598     checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7599 			 aarch64_sys_regs_tlbi[i].name,
7600 			 (void *) (aarch64_sys_regs_tlbi + i));
7601 
7602   for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7603     checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7604 			 (void *) (reg_names + i));
7605 
7606   for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7607     checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7608 			 (void *) (nzcv_names + i));
7609 
7610   for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7611     {
7612       const char *name = aarch64_operand_modifiers[i].name;
7613       checked_hash_insert (aarch64_shift_hsh, name,
7614 			   (void *) (aarch64_operand_modifiers + i));
7615       /* Also hash the name in the upper case.  */
7616       checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7617 			   (void *) (aarch64_operand_modifiers + i));
7618     }
7619 
7620   for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7621     {
7622       unsigned int j;
7623       /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7624 	 the same condition code.  */
7625       for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7626 	{
7627 	  const char *name = aarch64_conds[i].names[j];
7628 	  if (name == NULL)
7629 	    break;
7630 	  checked_hash_insert (aarch64_cond_hsh, name,
7631 			       (void *) (aarch64_conds + i));
7632 	  /* Also hash the name in the upper case.  */
7633 	  checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7634 			       (void *) (aarch64_conds + i));
7635 	}
7636     }
7637 
7638   for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7639     {
7640       const char *name = aarch64_barrier_options[i].name;
7641       /* Skip xx00 - the unallocated values of option.  */
7642       if ((i & 0x3) == 0)
7643 	continue;
7644       checked_hash_insert (aarch64_barrier_opt_hsh, name,
7645 			   (void *) (aarch64_barrier_options + i));
7646       /* Also hash the name in the upper case.  */
7647       checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7648 			   (void *) (aarch64_barrier_options + i));
7649     }
7650 
7651   for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7652     {
7653       const char* name = aarch64_prfops[i].name;
7654       /* Skip the unallocated hint encodings.  */
7655       if (name == NULL)
7656 	continue;
7657       checked_hash_insert (aarch64_pldop_hsh, name,
7658 			   (void *) (aarch64_prfops + i));
7659       /* Also hash the name in the upper case.  */
7660       checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7661 			   (void *) (aarch64_prfops + i));
7662     }
7663 
7664   for (i = 0; aarch64_hint_options[i].name != NULL; i++)
7665     {
7666       const char* name = aarch64_hint_options[i].name;
7667 
7668       checked_hash_insert (aarch64_hint_opt_hsh, name,
7669 			   (void *) (aarch64_hint_options + i));
7670       /* Also hash the name in the upper case.  */
7671       checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7672 			   (void *) (aarch64_hint_options + i));
7673     }
7674 
7675   /* Set the cpu variant based on the command-line options.  */
7676   if (!mcpu_cpu_opt)
7677     mcpu_cpu_opt = march_cpu_opt;
7678 
7679   if (!mcpu_cpu_opt)
7680     mcpu_cpu_opt = &cpu_default;
7681 
7682   cpu_variant = *mcpu_cpu_opt;
7683 
7684   /* Record the CPU type.  */
7685   mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7686 
7687   bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7688 }
7689 
7690 /* Command line processing.  */
7691 
7692 const char *md_shortopts = "m:";
7693 
7694 #ifdef AARCH64_BI_ENDIAN
7695 #define OPTION_EB (OPTION_MD_BASE + 0)
7696 #define OPTION_EL (OPTION_MD_BASE + 1)
7697 #else
7698 #if TARGET_BYTES_BIG_ENDIAN
7699 #define OPTION_EB (OPTION_MD_BASE + 0)
7700 #else
7701 #define OPTION_EL (OPTION_MD_BASE + 1)
7702 #endif
7703 #endif
7704 
7705 struct option md_longopts[] = {
7706 #ifdef OPTION_EB
7707   {"EB", no_argument, NULL, OPTION_EB},
7708 #endif
7709 #ifdef OPTION_EL
7710   {"EL", no_argument, NULL, OPTION_EL},
7711 #endif
7712   {NULL, no_argument, NULL, 0}
7713 };
7714 
7715 size_t md_longopts_size = sizeof (md_longopts);
7716 
7717 struct aarch64_option_table
7718 {
7719   const char *option;			/* Option name to match.  */
7720   const char *help;			/* Help information.  */
7721   int *var;			/* Variable to change.  */
7722   int value;			/* What to change it to.  */
7723   char *deprecated;		/* If non-null, print this message.  */
7724 };
7725 
7726 static struct aarch64_option_table aarch64_opts[] = {
7727   {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7728   {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7729    NULL},
7730 #ifdef DEBUG_AARCH64
7731   {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7732 #endif /* DEBUG_AARCH64 */
7733   {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7734    NULL},
7735   {"mno-verbose-error", N_("do not output verbose error messages"),
7736    &verbose_error_p, 0, NULL},
7737   {NULL, NULL, NULL, 0, NULL}
7738 };
7739 
7740 struct aarch64_cpu_option_table
7741 {
7742   const char *name;
7743   const aarch64_feature_set value;
7744   /* The canonical name of the CPU, or NULL to use NAME converted to upper
7745      case.  */
7746   const char *canonical_name;
7747 };
7748 
7749 /* This list should, at a minimum, contain all the cpu names
7750    recognized by GCC.  */
7751 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7752   {"all", AARCH64_ANY, NULL},
7753   {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
7754 				  AARCH64_FEATURE_CRC), "Cortex-A35"},
7755   {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7756 				  AARCH64_FEATURE_CRC), "Cortex-A53"},
7757   {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7758 				  AARCH64_FEATURE_CRC), "Cortex-A57"},
7759   {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7760 				  AARCH64_FEATURE_CRC), "Cortex-A72"},
7761   {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
7762 				  AARCH64_FEATURE_CRC), "Cortex-A73"},
7763   {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7764 				 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7765 				"Samsung Exynos M1"},
7766   {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7767 			       AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7768    "Qualcomm QDF24XX"},
7769   {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7770 				AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7771    "Cavium ThunderX"},
7772   {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
7773 			      AARCH64_FEATURE_CRYPTO),
7774   "Broadcom Vulcan"},
7775   /* The 'xgene-1' name is an older name for 'xgene1', which was used
7776      in earlier releases and is superseded by 'xgene1' in all
7777      tools.  */
7778   {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7779   {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7780   {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7781 			      AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7782   {"generic", AARCH64_ARCH_V8, NULL},
7783 
7784   {NULL, AARCH64_ARCH_NONE, NULL}
7785 };
7786 
7787 struct aarch64_arch_option_table
7788 {
7789   const char *name;
7790   const aarch64_feature_set value;
7791 };
7792 
7793 /* This list should, at a minimum, contain all the architecture names
7794    recognized by GCC.  */
7795 static const struct aarch64_arch_option_table aarch64_archs[] = {
7796   {"all", AARCH64_ANY},
7797   {"armv8-a", AARCH64_ARCH_V8},
7798   {"armv8.1-a", AARCH64_ARCH_V8_1},
7799   {"armv8.2-a", AARCH64_ARCH_V8_2},
7800   {NULL, AARCH64_ARCH_NONE}
7801 };
7802 
7803 /* ISA extensions.  */
7804 struct aarch64_option_cpu_value_table
7805 {
7806   const char *name;
7807   const aarch64_feature_set value;
7808 };
7809 
7810 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7811   {"crc",		AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7812   {"crypto",		AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7813   {"fp",		AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7814   {"lse",		AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7815   {"simd",		AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7816   {"pan",		AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0)},
7817   {"lor",		AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0)},
7818   {"ras",		AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0)},
7819   {"rdma",		AARCH64_FEATURE (AARCH64_FEATURE_SIMD
7820 					 | AARCH64_FEATURE_RDMA, 0)},
7821   {"fp16",		AARCH64_FEATURE (AARCH64_FEATURE_F16
7822 					 | AARCH64_FEATURE_FP, 0)},
7823   {"profile",		AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0)},
7824   {NULL,		AARCH64_ARCH_NONE}
7825 };
7826 
7827 struct aarch64_long_option_table
7828 {
7829   const char *option;			/* Substring to match.  */
7830   const char *help;			/* Help information.  */
7831   int (*func) (const char *subopt);	/* Function to decode sub-option.  */
7832   char *deprecated;		/* If non-null, print this message.  */
7833 };
7834 
7835 static int
aarch64_parse_features(const char * str,const aarch64_feature_set ** opt_p,bfd_boolean ext_only)7836 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
7837 			bfd_boolean ext_only)
7838 {
7839   /* We insist on extensions being added before being removed.  We achieve
7840      this by using the ADDING_VALUE variable to indicate whether we are
7841      adding an extension (1) or removing it (0) and only allowing it to
7842      change in the order -1 -> 1 -> 0.  */
7843   int adding_value = -1;
7844   aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
7845 
7846   /* Copy the feature set, so that we can modify it.  */
7847   *ext_set = **opt_p;
7848   *opt_p = ext_set;
7849 
7850   while (str != NULL && *str != 0)
7851     {
7852       const struct aarch64_option_cpu_value_table *opt;
7853       const char *ext = NULL;
7854       int optlen;
7855 
7856       if (!ext_only)
7857 	{
7858 	  if (*str != '+')
7859 	    {
7860 	      as_bad (_("invalid architectural extension"));
7861 	      return 0;
7862 	    }
7863 
7864 	  ext = strchr (++str, '+');
7865 	}
7866 
7867       if (ext != NULL)
7868 	optlen = ext - str;
7869       else
7870 	optlen = strlen (str);
7871 
7872       if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7873 	{
7874 	  if (adding_value != 0)
7875 	    adding_value = 0;
7876 	  optlen -= 2;
7877 	  str += 2;
7878 	}
7879       else if (optlen > 0)
7880 	{
7881 	  if (adding_value == -1)
7882 	    adding_value = 1;
7883 	  else if (adding_value != 1)
7884 	    {
7885 	      as_bad (_("must specify extensions to add before specifying "
7886 			"those to remove"));
7887 	      return FALSE;
7888 	    }
7889 	}
7890 
7891       if (optlen == 0)
7892 	{
7893 	  as_bad (_("missing architectural extension"));
7894 	  return 0;
7895 	}
7896 
7897       gas_assert (adding_value != -1);
7898 
7899       for (opt = aarch64_features; opt->name != NULL; opt++)
7900 	if (strncmp (opt->name, str, optlen) == 0)
7901 	  {
7902 	    /* Add or remove the extension.  */
7903 	    if (adding_value)
7904 	      AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7905 	    else
7906 	      AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7907 	    break;
7908 	  }
7909 
7910       if (opt->name == NULL)
7911 	{
7912 	  as_bad (_("unknown architectural extension `%s'"), str);
7913 	  return 0;
7914 	}
7915 
7916       str = ext;
7917     };
7918 
7919   return 1;
7920 }
7921 
7922 static int
aarch64_parse_cpu(const char * str)7923 aarch64_parse_cpu (const char *str)
7924 {
7925   const struct aarch64_cpu_option_table *opt;
7926   const char *ext = strchr (str, '+');
7927   size_t optlen;
7928 
7929   if (ext != NULL)
7930     optlen = ext - str;
7931   else
7932     optlen = strlen (str);
7933 
7934   if (optlen == 0)
7935     {
7936       as_bad (_("missing cpu name `%s'"), str);
7937       return 0;
7938     }
7939 
7940   for (opt = aarch64_cpus; opt->name != NULL; opt++)
7941     if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7942       {
7943 	mcpu_cpu_opt = &opt->value;
7944 	if (ext != NULL)
7945 	  return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7946 
7947 	return 1;
7948       }
7949 
7950   as_bad (_("unknown cpu `%s'"), str);
7951   return 0;
7952 }
7953 
7954 static int
aarch64_parse_arch(const char * str)7955 aarch64_parse_arch (const char *str)
7956 {
7957   const struct aarch64_arch_option_table *opt;
7958   const char *ext = strchr (str, '+');
7959   size_t optlen;
7960 
7961   if (ext != NULL)
7962     optlen = ext - str;
7963   else
7964     optlen = strlen (str);
7965 
7966   if (optlen == 0)
7967     {
7968       as_bad (_("missing architecture name `%s'"), str);
7969       return 0;
7970     }
7971 
7972   for (opt = aarch64_archs; opt->name != NULL; opt++)
7973     if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7974       {
7975 	march_cpu_opt = &opt->value;
7976 	if (ext != NULL)
7977 	  return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7978 
7979 	return 1;
7980       }
7981 
7982   as_bad (_("unknown architecture `%s'\n"), str);
7983   return 0;
7984 }
7985 
7986 /* ABIs.  */
7987 struct aarch64_option_abi_value_table
7988 {
7989   const char *name;
7990   enum aarch64_abi_type value;
7991 };
7992 
7993 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7994   {"ilp32",		AARCH64_ABI_ILP32},
7995   {"lp64",		AARCH64_ABI_LP64},
7996 };
7997 
7998 static int
aarch64_parse_abi(const char * str)7999 aarch64_parse_abi (const char *str)
8000 {
8001   unsigned int i;
8002 
8003   if (str[0] == '\0')
8004     {
8005       as_bad (_("missing abi name `%s'"), str);
8006       return 0;
8007     }
8008 
8009   for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8010     if (strcmp (str, aarch64_abis[i].name) == 0)
8011       {
8012 	aarch64_abi = aarch64_abis[i].value;
8013 	return 1;
8014       }
8015 
8016   as_bad (_("unknown abi `%s'\n"), str);
8017   return 0;
8018 }
8019 
8020 static struct aarch64_long_option_table aarch64_long_opts[] = {
8021 #ifdef OBJ_ELF
8022   {"mabi=", N_("<abi name>\t  specify for ABI <abi name>"),
8023    aarch64_parse_abi, NULL},
8024 #endif /* OBJ_ELF */
8025   {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
8026    aarch64_parse_cpu, NULL},
8027   {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
8028    aarch64_parse_arch, NULL},
8029   {NULL, NULL, 0, NULL}
8030 };
8031 
8032 int
md_parse_option(int c,const char * arg)8033 md_parse_option (int c, const char *arg)
8034 {
8035   struct aarch64_option_table *opt;
8036   struct aarch64_long_option_table *lopt;
8037 
8038   switch (c)
8039     {
8040 #ifdef OPTION_EB
8041     case OPTION_EB:
8042       target_big_endian = 1;
8043       break;
8044 #endif
8045 
8046 #ifdef OPTION_EL
8047     case OPTION_EL:
8048       target_big_endian = 0;
8049       break;
8050 #endif
8051 
8052     case 'a':
8053       /* Listing option.  Just ignore these, we don't support additional
8054          ones.  */
8055       return 0;
8056 
8057     default:
8058       for (opt = aarch64_opts; opt->option != NULL; opt++)
8059 	{
8060 	  if (c == opt->option[0]
8061 	      && ((arg == NULL && opt->option[1] == 0)
8062 		  || streq (arg, opt->option + 1)))
8063 	    {
8064 	      /* If the option is deprecated, tell the user.  */
8065 	      if (opt->deprecated != NULL)
8066 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8067 			   arg ? arg : "", _(opt->deprecated));
8068 
8069 	      if (opt->var != NULL)
8070 		*opt->var = opt->value;
8071 
8072 	      return 1;
8073 	    }
8074 	}
8075 
8076       for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8077 	{
8078 	  /* These options are expected to have an argument.  */
8079 	  if (c == lopt->option[0]
8080 	      && arg != NULL
8081 	      && strncmp (arg, lopt->option + 1,
8082 			  strlen (lopt->option + 1)) == 0)
8083 	    {
8084 	      /* If the option is deprecated, tell the user.  */
8085 	      if (lopt->deprecated != NULL)
8086 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8087 			   _(lopt->deprecated));
8088 
8089 	      /* Call the sup-option parser.  */
8090 	      return lopt->func (arg + strlen (lopt->option) - 1);
8091 	    }
8092 	}
8093 
8094       return 0;
8095     }
8096 
8097   return 1;
8098 }
8099 
8100 void
md_show_usage(FILE * fp)8101 md_show_usage (FILE * fp)
8102 {
8103   struct aarch64_option_table *opt;
8104   struct aarch64_long_option_table *lopt;
8105 
8106   fprintf (fp, _(" AArch64-specific assembler options:\n"));
8107 
8108   for (opt = aarch64_opts; opt->option != NULL; opt++)
8109     if (opt->help != NULL)
8110       fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
8111 
8112   for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8113     if (lopt->help != NULL)
8114       fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
8115 
8116 #ifdef OPTION_EB
8117   fprintf (fp, _("\
8118   -EB                     assemble code for a big-endian cpu\n"));
8119 #endif
8120 
8121 #ifdef OPTION_EL
8122   fprintf (fp, _("\
8123   -EL                     assemble code for a little-endian cpu\n"));
8124 #endif
8125 }
8126 
8127 /* Parse a .cpu directive.  */
8128 
8129 static void
s_aarch64_cpu(int ignored ATTRIBUTE_UNUSED)8130 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8131 {
8132   const struct aarch64_cpu_option_table *opt;
8133   char saved_char;
8134   char *name;
8135   char *ext;
8136   size_t optlen;
8137 
8138   name = input_line_pointer;
8139   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8140     input_line_pointer++;
8141   saved_char = *input_line_pointer;
8142   *input_line_pointer = 0;
8143 
8144   ext = strchr (name, '+');
8145 
8146   if (ext != NULL)
8147     optlen = ext - name;
8148   else
8149     optlen = strlen (name);
8150 
8151   /* Skip the first "all" entry.  */
8152   for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8153     if (strlen (opt->name) == optlen
8154 	&& strncmp (name, opt->name, optlen) == 0)
8155       {
8156 	mcpu_cpu_opt = &opt->value;
8157 	if (ext != NULL)
8158 	  if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8159 	    return;
8160 
8161 	cpu_variant = *mcpu_cpu_opt;
8162 
8163 	*input_line_pointer = saved_char;
8164 	demand_empty_rest_of_line ();
8165 	return;
8166       }
8167   as_bad (_("unknown cpu `%s'"), name);
8168   *input_line_pointer = saved_char;
8169   ignore_rest_of_line ();
8170 }
8171 
8172 
8173 /* Parse a .arch directive.  */
8174 
8175 static void
s_aarch64_arch(int ignored ATTRIBUTE_UNUSED)8176 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8177 {
8178   const struct aarch64_arch_option_table *opt;
8179   char saved_char;
8180   char *name;
8181   char *ext;
8182   size_t optlen;
8183 
8184   name = input_line_pointer;
8185   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8186     input_line_pointer++;
8187   saved_char = *input_line_pointer;
8188   *input_line_pointer = 0;
8189 
8190   ext = strchr (name, '+');
8191 
8192   if (ext != NULL)
8193     optlen = ext - name;
8194   else
8195     optlen = strlen (name);
8196 
8197   /* Skip the first "all" entry.  */
8198   for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8199     if (strlen (opt->name) == optlen
8200 	&& strncmp (name, opt->name, optlen) == 0)
8201       {
8202 	mcpu_cpu_opt = &opt->value;
8203 	if (ext != NULL)
8204 	  if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8205 	    return;
8206 
8207 	cpu_variant = *mcpu_cpu_opt;
8208 
8209 	*input_line_pointer = saved_char;
8210 	demand_empty_rest_of_line ();
8211 	return;
8212       }
8213 
8214   as_bad (_("unknown architecture `%s'\n"), name);
8215   *input_line_pointer = saved_char;
8216   ignore_rest_of_line ();
8217 }
8218 
8219 /* Parse a .arch_extension directive.  */
8220 
8221 static void
s_aarch64_arch_extension(int ignored ATTRIBUTE_UNUSED)8222 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8223 {
8224   char saved_char;
8225   char *ext = input_line_pointer;;
8226 
8227   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8228     input_line_pointer++;
8229   saved_char = *input_line_pointer;
8230   *input_line_pointer = 0;
8231 
8232   if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8233     return;
8234 
8235   cpu_variant = *mcpu_cpu_opt;
8236 
8237   *input_line_pointer = saved_char;
8238   demand_empty_rest_of_line ();
8239 }
8240 
8241 /* Copy symbol information.  */
8242 
8243 void
aarch64_copy_symbol_attributes(symbolS * dest,symbolS * src)8244 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8245 {
8246   AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8247 }
8248