1 /* aarch64-dis.c -- AArch64 disassembler.
2    Copyright (C) 2009-2014 Free Software Foundation, Inc.
3    Contributed by ARM Ltd.
4 
5    This file is part of the GNU opcodes library.
6 
7    This library is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    It is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program; see the file COPYING3. If not,
19    see <http://www.gnu.org/licenses/>.  */
20 
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28 
29 #define ERR_OK   0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33 
34 #define INSNLEN 4
35 
36 /* Cached mapping symbol state.  */
37 enum map_type
38 {
39   MAP_INSN,
40   MAP_DATA
41 };
42 
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46 
47 /* Other options */
48 static int no_aliases = 0;	/* If set disassemble as most general inst.  */
49 
50 
51 static void
set_default_aarch64_dis_options(struct disassemble_info * info ATTRIBUTE_UNUSED)52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55 
56 static void
parse_aarch64_dis_option(const char * option,unsigned int len ATTRIBUTE_UNUSED)57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59   /* Try to match options that are simple flags */
60   if (CONST_STRNEQ (option, "no-aliases"))
61     {
62       no_aliases = 1;
63       return;
64     }
65 
66   if (CONST_STRNEQ (option, "aliases"))
67     {
68       no_aliases = 0;
69       return;
70     }
71 
72 #ifdef DEBUG_AARCH64
73   if (CONST_STRNEQ (option, "debug_dump"))
74     {
75       debug_dump = 1;
76       return;
77     }
78 #endif /* DEBUG_AARCH64 */
79 
80   /* Invalid option.  */
81   fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
82 }
83 
84 static void
parse_aarch64_dis_options(const char * options)85 parse_aarch64_dis_options (const char *options)
86 {
87   const char *option_end;
88 
89   if (options == NULL)
90     return;
91 
92   while (*options != '\0')
93     {
94       /* Skip empty options.  */
95       if (*options == ',')
96 	{
97 	  options++;
98 	  continue;
99 	}
100 
101       /* We know that *options is neither NUL or a comma.  */
102       option_end = options + 1;
103       while (*option_end != ',' && *option_end != '\0')
104 	option_end++;
105 
106       parse_aarch64_dis_option (options, option_end - options);
107 
108       /* Go on to the next one.  If option_end points to a comma, it
109 	 will be skipped above.  */
110       options = option_end;
111     }
112 }
113 
114 /* Functions doing the instruction disassembling.  */
115 
116 /* The unnamed arguments consist of the number of fields and information about
117    these fields where the VALUE will be extracted from CODE and returned.
118    MASK can be zero or the base mask of the opcode.
119 
120    N.B. the fields are required to be in such an order than the most signficant
121    field for VALUE comes the first, e.g. the <index> in
122     SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123    is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124    the order of H, L, M.  */
125 
126 static inline aarch64_insn
extract_fields(aarch64_insn code,aarch64_insn mask,...)127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129   uint32_t num;
130   const aarch64_field *field;
131   enum aarch64_field_kind kind;
132   va_list va;
133 
134   va_start (va, mask);
135   num = va_arg (va, uint32_t);
136   assert (num <= 5);
137   aarch64_insn value = 0x0;
138   while (num--)
139     {
140       kind = va_arg (va, enum aarch64_field_kind);
141       field = &fields[kind];
142       value <<= field->width;
143       value |= extract_field (kind, code, mask);
144     }
145   return value;
146 }
147 
148 /* Sign-extend bit I of VALUE.  */
149 static inline int32_t
sign_extend(aarch64_insn value,unsigned i)150 sign_extend (aarch64_insn value, unsigned i)
151 {
152   uint32_t ret = value;
153 
154   assert (i < 32);
155   if ((value >> i) & 0x1)
156     {
157       uint32_t val = (uint32_t)(-1) << i;
158       ret = ret | val;
159     }
160   return (int32_t) ret;
161 }
162 
163 /* N.B. the following inline helpfer functions create a dependency on the
164    order of operand qualifier enumerators.  */
165 
166 /* Given VALUE, return qualifier for a general purpose register.  */
167 static inline enum aarch64_opnd_qualifier
get_greg_qualifier_from_value(aarch64_insn value)168 get_greg_qualifier_from_value (aarch64_insn value)
169 {
170   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
171   assert (value <= 0x1
172 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
173   return qualifier;
174 }
175 
176 /* Given VALUE, return qualifier for a vector register.  */
177 static inline enum aarch64_opnd_qualifier
get_vreg_qualifier_from_value(aarch64_insn value)178 get_vreg_qualifier_from_value (aarch64_insn value)
179 {
180   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
181 
182   assert (value <= 0x8
183 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
184   return qualifier;
185 }
186 
187 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register.  */
188 static inline enum aarch64_opnd_qualifier
get_sreg_qualifier_from_value(aarch64_insn value)189 get_sreg_qualifier_from_value (aarch64_insn value)
190 {
191   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
192 
193   assert (value <= 0x4
194 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
195   return qualifier;
196 }
197 
198 /* Given the instruction in *INST which is probably half way through the
199    decoding and our caller wants to know the expected qualifier for operand
200    I.  Return such a qualifier if we can establish it; otherwise return
201    AARCH64_OPND_QLF_NIL.  */
202 
203 static aarch64_opnd_qualifier_t
get_expected_qualifier(const aarch64_inst * inst,int i)204 get_expected_qualifier (const aarch64_inst *inst, int i)
205 {
206   aarch64_opnd_qualifier_seq_t qualifiers;
207   /* Should not be called if the qualifier is known.  */
208   assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
209   if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
210 			       i, qualifiers))
211     return qualifiers[i];
212   else
213     return AARCH64_OPND_QLF_NIL;
214 }
215 
216 /* Operand extractors.  */
217 
218 int
aarch64_ext_regno(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)219 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
220 		   const aarch64_insn code,
221 		   const aarch64_inst *inst ATTRIBUTE_UNUSED)
222 {
223   info->reg.regno = extract_field (self->fields[0], code, 0);
224   return 1;
225 }
226 
227 int
aarch64_ext_regno_pair(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code ATTRIBUTE_UNUSED,const aarch64_inst * inst ATTRIBUTE_UNUSED)228 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
229 		   const aarch64_insn code ATTRIBUTE_UNUSED,
230 		   const aarch64_inst *inst ATTRIBUTE_UNUSED)
231 {
232   assert (info->idx == 1
233 	  || info->idx ==3);
234   info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
235   return 1;
236 }
237 
238 /* e.g. IC <ic_op>{, <Xt>}.  */
239 int
aarch64_ext_regrt_sysins(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)240 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
241 			  const aarch64_insn code,
242 			  const aarch64_inst *inst ATTRIBUTE_UNUSED)
243 {
244   info->reg.regno = extract_field (self->fields[0], code, 0);
245   assert (info->idx == 1
246 	  && (aarch64_get_operand_class (inst->operands[0].type)
247 	      == AARCH64_OPND_CLASS_SYSTEM));
248   /* This will make the constraint checking happy and more importantly will
249      help the disassembler determine whether this operand is optional or
250      not.  */
251   info->present = inst->operands[0].sysins_op->has_xt;
252 
253   return 1;
254 }
255 
256 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
257 int
aarch64_ext_reglane(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)258 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
259 		     const aarch64_insn code,
260 		     const aarch64_inst *inst ATTRIBUTE_UNUSED)
261 {
262   /* regno */
263   info->reglane.regno = extract_field (self->fields[0], code,
264 				       inst->opcode->mask);
265 
266   /* Index and/or type.  */
267   if (inst->opcode->iclass == asisdone
268     || inst->opcode->iclass == asimdins)
269     {
270       if (info->type == AARCH64_OPND_En
271 	  && inst->opcode->operands[0] == AARCH64_OPND_Ed)
272 	{
273 	  unsigned shift;
274 	  /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>].  */
275 	  assert (info->idx == 1);	/* Vn */
276 	  aarch64_insn value = extract_field (FLD_imm4, code, 0);
277 	  /* Depend on AARCH64_OPND_Ed to determine the qualifier.  */
278 	  info->qualifier = get_expected_qualifier (inst, info->idx);
279 	  shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
280 	  info->reglane.index = value >> shift;
281 	}
282       else
283 	{
284 	  /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
285 	     imm5<3:0>	<V>
286 	     0000	RESERVED
287 	     xxx1	B
288 	     xx10	H
289 	     x100	S
290 	     1000	D  */
291 	  int pos = -1;
292 	  aarch64_insn value = extract_field (FLD_imm5, code, 0);
293 	  while (++pos <= 3 && (value & 0x1) == 0)
294 	    value >>= 1;
295 	  if (pos > 3)
296 	    return 0;
297 	  info->qualifier = get_sreg_qualifier_from_value (pos);
298 	  info->reglane.index = (unsigned) (value >> 1);
299 	}
300     }
301   else
302     {
303       /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
304          or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
305 
306       /* Need information in other operand(s) to help decoding.  */
307       info->qualifier = get_expected_qualifier (inst, info->idx);
308       switch (info->qualifier)
309 	{
310 	case AARCH64_OPND_QLF_S_H:
311 	  /* h:l:m */
312 	  info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
313 						FLD_M);
314 	  info->reglane.regno &= 0xf;
315 	  break;
316 	case AARCH64_OPND_QLF_S_S:
317 	  /* h:l */
318 	  info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
319 	  break;
320 	case AARCH64_OPND_QLF_S_D:
321 	  /* H */
322 	  info->reglane.index = extract_field (FLD_H, code, 0);
323 	  break;
324 	default:
325 	  return 0;
326 	}
327     }
328 
329   return 1;
330 }
331 
332 int
aarch64_ext_reglist(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)333 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
334 		     const aarch64_insn code,
335 		     const aarch64_inst *inst ATTRIBUTE_UNUSED)
336 {
337   /* R */
338   info->reglist.first_regno = extract_field (self->fields[0], code, 0);
339   /* len */
340   info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
341   return 1;
342 }
343 
344 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions.  */
345 int
aarch64_ext_ldst_reglist(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst)346 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
347 			  aarch64_opnd_info *info, const aarch64_insn code,
348 			  const aarch64_inst *inst)
349 {
350   aarch64_insn value;
351   /* Number of elements in each structure to be loaded/stored.  */
352   unsigned expected_num = get_opcode_dependent_value (inst->opcode);
353 
354   struct
355     {
356       unsigned is_reserved;
357       unsigned num_regs;
358       unsigned num_elements;
359     } data [] =
360   {   {0, 4, 4},
361       {1, 4, 4},
362       {0, 4, 1},
363       {0, 4, 2},
364       {0, 3, 3},
365       {1, 3, 3},
366       {0, 3, 1},
367       {0, 1, 1},
368       {0, 2, 2},
369       {1, 2, 2},
370       {0, 2, 1},
371   };
372 
373   /* Rt */
374   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
375   /* opcode */
376   value = extract_field (FLD_opcode, code, 0);
377   if (expected_num != data[value].num_elements || data[value].is_reserved)
378     return 0;
379   info->reglist.num_regs = data[value].num_regs;
380 
381   return 1;
382 }
383 
384 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
385    lanes instructions.  */
386 int
aarch64_ext_ldst_reglist_r(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst)387 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
388 			    aarch64_opnd_info *info, const aarch64_insn code,
389 			    const aarch64_inst *inst)
390 {
391   aarch64_insn value;
392 
393   /* Rt */
394   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
395   /* S */
396   value = extract_field (FLD_S, code, 0);
397 
398   /* Number of registers is equal to the number of elements in
399      each structure to be loaded/stored.  */
400   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
401   assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
402 
403   /* Except when it is LD1R.  */
404   if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
405     info->reglist.num_regs = 2;
406 
407   return 1;
408 }
409 
410 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
411    load/store single element instructions.  */
412 int
aarch64_ext_ldst_elemlist(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)413 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
414 			   aarch64_opnd_info *info, const aarch64_insn code,
415 			   const aarch64_inst *inst ATTRIBUTE_UNUSED)
416 {
417   aarch64_field field = {0, 0};
418   aarch64_insn QSsize;		/* fields Q:S:size.  */
419   aarch64_insn opcodeh2;	/* opcode<2:1> */
420 
421   /* Rt */
422   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
423 
424   /* Decode the index, opcode<2:1> and size.  */
425   gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
426   opcodeh2 = extract_field_2 (&field, code, 0);
427   QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
428   switch (opcodeh2)
429     {
430     case 0x0:
431       info->qualifier = AARCH64_OPND_QLF_S_B;
432       /* Index encoded in "Q:S:size".  */
433       info->reglist.index = QSsize;
434       break;
435     case 0x1:
436       if (QSsize & 0x1)
437 	/* UND.  */
438 	return 0;
439       info->qualifier = AARCH64_OPND_QLF_S_H;
440       /* Index encoded in "Q:S:size<1>".  */
441       info->reglist.index = QSsize >> 1;
442       break;
443     case 0x2:
444       if ((QSsize >> 1) & 0x1)
445 	/* UND.  */
446 	return 0;
447       if ((QSsize & 0x1) == 0)
448 	{
449 	  info->qualifier = AARCH64_OPND_QLF_S_S;
450 	  /* Index encoded in "Q:S".  */
451 	  info->reglist.index = QSsize >> 2;
452 	}
453       else
454 	{
455 	  if (extract_field (FLD_S, code, 0))
456 	    /* UND */
457 	    return 0;
458 	  info->qualifier = AARCH64_OPND_QLF_S_D;
459 	  /* Index encoded in "Q".  */
460 	  info->reglist.index = QSsize >> 3;
461 	}
462       break;
463     default:
464       return 0;
465     }
466 
467   info->reglist.has_index = 1;
468   info->reglist.num_regs = 0;
469   /* Number of registers is equal to the number of elements in
470      each structure to be loaded/stored.  */
471   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
472   assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
473 
474   return 1;
475 }
476 
477 /* Decode fields immh:immb and/or Q for e.g.
478    SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
479    or SSHR <V><d>, <V><n>, #<shift>.  */
480 
481 int
aarch64_ext_advsimd_imm_shift(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst)482 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
483 			       aarch64_opnd_info *info, const aarch64_insn code,
484 			       const aarch64_inst *inst)
485 {
486   int pos;
487   aarch64_insn Q, imm, immh;
488   enum aarch64_insn_class iclass = inst->opcode->iclass;
489 
490   immh = extract_field (FLD_immh, code, 0);
491   if (immh == 0)
492     return 0;
493   imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
494   pos = 4;
495   /* Get highest set bit in immh.  */
496   while (--pos >= 0 && (immh & 0x8) == 0)
497     immh <<= 1;
498 
499   assert ((iclass == asimdshf || iclass == asisdshf)
500 	  && (info->type == AARCH64_OPND_IMM_VLSR
501 	      || info->type == AARCH64_OPND_IMM_VLSL));
502 
503   if (iclass == asimdshf)
504     {
505       Q = extract_field (FLD_Q, code, 0);
506       /* immh	Q	<T>
507 	 0000	x	SEE AdvSIMD modified immediate
508 	 0001	0	8B
509 	 0001	1	16B
510 	 001x	0	4H
511 	 001x	1	8H
512 	 01xx	0	2S
513 	 01xx	1	4S
514 	 1xxx	0	RESERVED
515 	 1xxx	1	2D  */
516       info->qualifier =
517 	get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
518     }
519   else
520     info->qualifier = get_sreg_qualifier_from_value (pos);
521 
522   if (info->type == AARCH64_OPND_IMM_VLSR)
523     /* immh	<shift>
524        0000	SEE AdvSIMD modified immediate
525        0001	(16-UInt(immh:immb))
526        001x	(32-UInt(immh:immb))
527        01xx	(64-UInt(immh:immb))
528        1xxx	(128-UInt(immh:immb))  */
529     info->imm.value = (16 << pos) - imm;
530   else
531     /* immh:immb
532        immh	<shift>
533        0000	SEE AdvSIMD modified immediate
534        0001	(UInt(immh:immb)-8)
535        001x	(UInt(immh:immb)-16)
536        01xx	(UInt(immh:immb)-32)
537        1xxx	(UInt(immh:immb)-64)  */
538     info->imm.value = imm - (8 << pos);
539 
540   return 1;
541 }
542 
543 /* Decode shift immediate for e.g. sshr (imm).  */
544 int
aarch64_ext_shll_imm(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)545 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
546 		      aarch64_opnd_info *info, const aarch64_insn code,
547 		      const aarch64_inst *inst ATTRIBUTE_UNUSED)
548 {
549   int64_t imm;
550   aarch64_insn val;
551   val = extract_field (FLD_size, code, 0);
552   switch (val)
553     {
554     case 0: imm = 8; break;
555     case 1: imm = 16; break;
556     case 2: imm = 32; break;
557     default: return 0;
558     }
559   info->imm.value = imm;
560   return 1;
561 }
562 
563 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
564    value in the field(s) will be extracted as unsigned immediate value.  */
565 int
aarch64_ext_imm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)566 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
567 		 const aarch64_insn code,
568 		 const aarch64_inst *inst ATTRIBUTE_UNUSED)
569 {
570   int64_t imm;
571   /* Maximum of two fields to extract.  */
572   assert (self->fields[2] == FLD_NIL);
573 
574   if (self->fields[1] == FLD_NIL)
575     imm = extract_field (self->fields[0], code, 0);
576   else
577     /* e.g. TBZ b5:b40.  */
578     imm = extract_fields (code, 0, 2, self->fields[0], self->fields[1]);
579 
580   if (info->type == AARCH64_OPND_FPIMM)
581     info->imm.is_fp = 1;
582 
583   if (operand_need_sign_extension (self))
584     imm = sign_extend (imm, get_operand_fields_width (self) - 1);
585 
586   if (operand_need_shift_by_two (self))
587     imm <<= 2;
588 
589   if (info->type == AARCH64_OPND_ADDR_ADRP)
590     imm <<= 12;
591 
592   info->imm.value = imm;
593   return 1;
594 }
595 
596 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}.  */
597 int
aarch64_ext_imm_half(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)598 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
599 		      const aarch64_insn code,
600 		      const aarch64_inst *inst ATTRIBUTE_UNUSED)
601 {
602   aarch64_ext_imm (self, info, code, inst);
603   info->shifter.kind = AARCH64_MOD_LSL;
604   info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
605   return 1;
606 }
607 
608 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
609      MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}.  */
610 int
aarch64_ext_advsimd_imm_modified(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)611 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
612 				  aarch64_opnd_info *info,
613 				  const aarch64_insn code,
614 				  const aarch64_inst *inst ATTRIBUTE_UNUSED)
615 {
616   uint64_t imm;
617   enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
618   aarch64_field field = {0, 0};
619 
620   assert (info->idx == 1);
621 
622   if (info->type == AARCH64_OPND_SIMD_FPIMM)
623     info->imm.is_fp = 1;
624 
625   /* a:b:c:d:e:f:g:h */
626   imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
627   if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
628     {
629       /* Either MOVI <Dd>, #<imm>
630 	 or     MOVI <Vd>.2D, #<imm>.
631 	 <imm> is a 64-bit immediate
632 	 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
633 	 encoded in "a:b:c:d:e:f:g:h".	*/
634       int i;
635       unsigned abcdefgh = imm;
636       for (imm = 0ull, i = 0; i < 8; i++)
637 	if (((abcdefgh >> i) & 0x1) != 0)
638 	  imm |= 0xffull << (8 * i);
639     }
640   info->imm.value = imm;
641 
642   /* cmode */
643   info->qualifier = get_expected_qualifier (inst, info->idx);
644   switch (info->qualifier)
645     {
646     case AARCH64_OPND_QLF_NIL:
647       /* no shift */
648       info->shifter.kind = AARCH64_MOD_NONE;
649       return 1;
650     case AARCH64_OPND_QLF_LSL:
651       /* shift zeros */
652       info->shifter.kind = AARCH64_MOD_LSL;
653       switch (aarch64_get_qualifier_esize (opnd0_qualifier))
654 	{
655 	case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break;	/* per word */
656 	case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break;	/* per half */
657 	case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break;	/* per byte */
658 	default: assert (0); return 0;
659 	}
660       /* 00: 0; 01: 8; 10:16; 11:24.  */
661       info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
662       break;
663     case AARCH64_OPND_QLF_MSL:
664       /* shift ones */
665       info->shifter.kind = AARCH64_MOD_MSL;
666       gen_sub_field (FLD_cmode, 0, 1, &field);		/* per word */
667       info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
668       break;
669     default:
670       assert (0);
671       return 0;
672     }
673 
674   return 1;
675 }
676 
677 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>.  */
678 int
aarch64_ext_fbits(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)679 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
680 		   aarch64_opnd_info *info, const aarch64_insn code,
681 		   const aarch64_inst *inst ATTRIBUTE_UNUSED)
682 {
683   info->imm.value = 64- extract_field (FLD_scale, code, 0);
684   return 1;
685 }
686 
687 /* Decode arithmetic immediate for e.g.
688      SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}.  */
689 int
aarch64_ext_aimm(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)690 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
691 		  aarch64_opnd_info *info, const aarch64_insn code,
692 		  const aarch64_inst *inst ATTRIBUTE_UNUSED)
693 {
694   aarch64_insn value;
695 
696   info->shifter.kind = AARCH64_MOD_LSL;
697   /* shift */
698   value = extract_field (FLD_shift, code, 0);
699   if (value >= 2)
700     return 0;
701   info->shifter.amount = value ? 12 : 0;
702   /* imm12 (unsigned) */
703   info->imm.value = extract_field (FLD_imm12, code, 0);
704 
705   return 1;
706 }
707 
708 /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>.  */
709 
710 int
aarch64_ext_limm(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)711 aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
712 		  aarch64_opnd_info *info, const aarch64_insn code,
713 		  const aarch64_inst *inst ATTRIBUTE_UNUSED)
714 {
715   uint64_t imm, mask;
716   uint32_t sf;
717   uint32_t N, R, S;
718   unsigned simd_size;
719   aarch64_insn value;
720 
721   value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
722   assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
723 	  || inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
724   sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
725 
726   /* value is N:immr:imms.  */
727   S = value & 0x3f;
728   R = (value >> 6) & 0x3f;
729   N = (value >> 12) & 0x1;
730 
731   if (sf == 0 && N == 1)
732     return 0;
733 
734   /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
735      (in other words, right rotated by R), then replicated.  */
736   if (N != 0)
737     {
738       simd_size = 64;
739       mask = 0xffffffffffffffffull;
740     }
741   else
742     {
743       switch (S)
744 	{
745 	case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32;           break;
746 	case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
747 	case 0x30 ... 0x37: /* 110xxx */ simd_size =  8; S &= 0x7; break;
748 	case 0x38 ... 0x3b: /* 1110xx */ simd_size =  4; S &= 0x3; break;
749 	case 0x3c ... 0x3d: /* 11110x */ simd_size =  2; S &= 0x1; break;
750 	default: return 0;
751 	}
752       mask = (1ull << simd_size) - 1;
753       /* Top bits are IGNORED.  */
754       R &= simd_size - 1;
755     }
756   /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected.  */
757   if (S == simd_size - 1)
758     return 0;
759   /* S+1 consecutive bits to 1.  */
760   /* NOTE: S can't be 63 due to detection above.  */
761   imm = (1ull << (S + 1)) - 1;
762   /* Rotate to the left by simd_size - R.  */
763   if (R != 0)
764     imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
765   /* Replicate the value according to SIMD size.  */
766   switch (simd_size)
767     {
768     case  2: imm = (imm <<  2) | imm;
769     case  4: imm = (imm <<  4) | imm;
770     case  8: imm = (imm <<  8) | imm;
771     case 16: imm = (imm << 16) | imm;
772     case 32: imm = (imm << 32) | imm;
773     case 64: break;
774     default: assert (0); return 0;
775     }
776 
777   info->imm.value = sf ? imm : imm & 0xffffffff;
778 
779   return 1;
780 }
781 
782 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
783    or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>.  */
784 int
aarch64_ext_ft(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst)785 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
786 		aarch64_opnd_info *info,
787 		const aarch64_insn code, const aarch64_inst *inst)
788 {
789   aarch64_insn value;
790 
791   /* Rt */
792   info->reg.regno = extract_field (FLD_Rt, code, 0);
793 
794   /* size */
795   value = extract_field (FLD_ldst_size, code, 0);
796   if (inst->opcode->iclass == ldstpair_indexed
797       || inst->opcode->iclass == ldstnapair_offs
798       || inst->opcode->iclass == ldstpair_off
799       || inst->opcode->iclass == loadlit)
800     {
801       enum aarch64_opnd_qualifier qualifier;
802       switch (value)
803 	{
804 	case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
805 	case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
806 	case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
807 	default: return 0;
808 	}
809       info->qualifier = qualifier;
810     }
811   else
812     {
813       /* opc1:size */
814       value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
815       if (value > 0x4)
816 	return 0;
817       info->qualifier = get_sreg_qualifier_from_value (value);
818     }
819 
820   return 1;
821 }
822 
823 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}].  */
824 int
aarch64_ext_addr_simple(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)825 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
826 			 aarch64_opnd_info *info,
827 			 aarch64_insn code,
828 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
829 {
830   /* Rn */
831   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
832   return 1;
833 }
834 
835 /* Decode the address operand for e.g.
836      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
837 int
aarch64_ext_addr_regoff(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst)838 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
839 			 aarch64_opnd_info *info,
840 			 aarch64_insn code, const aarch64_inst *inst)
841 {
842   aarch64_insn S, value;
843 
844   /* Rn */
845   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
846   /* Rm */
847   info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
848   /* option */
849   value = extract_field (FLD_option, code, 0);
850   info->shifter.kind =
851     aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
852   /* Fix-up the shifter kind; although the table-driven approach is
853      efficient, it is slightly inflexible, thus needing this fix-up.  */
854   if (info->shifter.kind == AARCH64_MOD_UXTX)
855     info->shifter.kind = AARCH64_MOD_LSL;
856   /* S */
857   S = extract_field (FLD_S, code, 0);
858   if (S == 0)
859     {
860       info->shifter.amount = 0;
861       info->shifter.amount_present = 0;
862     }
863   else
864     {
865       int size;
866       /* Need information in other operand(s) to help achieve the decoding
867 	 from 'S' field.  */
868       info->qualifier = get_expected_qualifier (inst, info->idx);
869       /* Get the size of the data element that is accessed, which may be
870 	 different from that of the source register size, e.g. in strb/ldrb.  */
871       size = aarch64_get_qualifier_esize (info->qualifier);
872       info->shifter.amount = get_logsz (size);
873       info->shifter.amount_present = 1;
874     }
875 
876   return 1;
877 }
878 
879 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>.  */
880 int
aarch64_ext_addr_simm(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst)881 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
882 		       aarch64_insn code, const aarch64_inst *inst)
883 {
884   aarch64_insn imm;
885   info->qualifier = get_expected_qualifier (inst, info->idx);
886 
887   /* Rn */
888   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
889   /* simm (imm9 or imm7)  */
890   imm = extract_field (self->fields[0], code, 0);
891   info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
892   if (self->fields[0] == FLD_imm7)
893     /* scaled immediate in ld/st pair instructions.  */
894     info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
895   /* qualifier */
896   if (inst->opcode->iclass == ldst_unscaled
897       || inst->opcode->iclass == ldstnapair_offs
898       || inst->opcode->iclass == ldstpair_off
899       || inst->opcode->iclass == ldst_unpriv)
900     info->addr.writeback = 0;
901   else
902     {
903       /* pre/post- index */
904       info->addr.writeback = 1;
905       if (extract_field (self->fields[1], code, 0) == 1)
906 	info->addr.preind = 1;
907       else
908 	info->addr.postind = 1;
909     }
910 
911   return 1;
912 }
913 
914 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}].  */
915 int
aarch64_ext_addr_uimm12(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)916 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
917 			 aarch64_insn code,
918 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
919 {
920   int shift;
921   info->qualifier = get_expected_qualifier (inst, info->idx);
922   shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
923   /* Rn */
924   info->addr.base_regno = extract_field (self->fields[0], code, 0);
925   /* uimm12 */
926   info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
927   return 1;
928 }
929 
930 /* Decode the address operand for e.g.
931      LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>.  */
932 int
aarch64_ext_simd_addr_post(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst)933 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
934 			    aarch64_opnd_info *info,
935 			    aarch64_insn code, const aarch64_inst *inst)
936 {
937   /* The opcode dependent area stores the number of elements in
938      each structure to be loaded/stored.  */
939   int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
940 
941   /* Rn */
942   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
943   /* Rm | #<amount>  */
944   info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
945   if (info->addr.offset.regno == 31)
946     {
947       if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
948 	/* Special handling of loading single structure to all lane.  */
949 	info->addr.offset.imm = (is_ld1r ? 1
950 				 : inst->operands[0].reglist.num_regs)
951 	  * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
952       else
953 	info->addr.offset.imm = inst->operands[0].reglist.num_regs
954 	  * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
955 	  * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
956     }
957   else
958     info->addr.offset.is_reg = 1;
959   info->addr.writeback = 1;
960 
961   return 1;
962 }
963 
964 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>.  */
965 int
aarch64_ext_cond(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)966 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
967 		  aarch64_opnd_info *info,
968 		  aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
969 {
970   aarch64_insn value;
971   /* cond */
972   value = extract_field (FLD_cond, code, 0);
973   info->cond = get_cond_from_value (value);
974   return 1;
975 }
976 
977 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>.  */
978 int
aarch64_ext_sysreg(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)979 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
980 		    aarch64_opnd_info *info,
981 		    aarch64_insn code,
982 		    const aarch64_inst *inst ATTRIBUTE_UNUSED)
983 {
984   /* op0:op1:CRn:CRm:op2 */
985   info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
986 				 FLD_CRm, FLD_op2);
987   return 1;
988 }
989 
990 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>.  */
991 int
aarch64_ext_pstatefield(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)992 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
993 			 aarch64_opnd_info *info, aarch64_insn code,
994 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
995 {
996   int i;
997   /* op1:op2 */
998   info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
999   for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1000     if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1001       return 1;
1002   /* Reserved value in <pstatefield>.  */
1003   return 0;
1004 }
1005 
1006 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>.  */
1007 int
aarch64_ext_sysins_op(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)1008 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1009 		       aarch64_opnd_info *info,
1010 		       aarch64_insn code,
1011 		       const aarch64_inst *inst ATTRIBUTE_UNUSED)
1012 {
1013   int i;
1014   aarch64_insn value;
1015   const aarch64_sys_ins_reg *sysins_ops;
1016   /* op0:op1:CRn:CRm:op2 */
1017   value = extract_fields (code, 0, 5,
1018 			  FLD_op0, FLD_op1, FLD_CRn,
1019 			  FLD_CRm, FLD_op2);
1020 
1021   switch (info->type)
1022     {
1023     case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1024     case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1025     case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1026     case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1027     default: assert (0); return 0;
1028     }
1029 
1030   for (i = 0; sysins_ops[i].template != NULL; ++i)
1031     if (sysins_ops[i].value == value)
1032       {
1033 	info->sysins_op = sysins_ops + i;
1034 	DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1035 		     info->sysins_op->template,
1036 		     (unsigned)info->sysins_op->value,
1037 		     info->sysins_op->has_xt, i);
1038 	return 1;
1039       }
1040 
1041   return 0;
1042 }
1043 
1044 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>.  */
1045 
1046 int
aarch64_ext_barrier(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)1047 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1048 		     aarch64_opnd_info *info,
1049 		     aarch64_insn code,
1050 		     const aarch64_inst *inst ATTRIBUTE_UNUSED)
1051 {
1052   /* CRm */
1053   info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1054   return 1;
1055 }
1056 
1057 /* Decode the prefetch operation option operand for e.g.
1058      PRFM <prfop>, [<Xn|SP>{, #<pimm>}].  */
1059 
1060 int
aarch64_ext_prfop(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)1061 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1062 		   aarch64_opnd_info *info,
1063 		   aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1064 {
1065   /* prfop in Rt */
1066   info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1067   return 1;
1068 }
1069 
1070 /* Decode the extended register operand for e.g.
1071      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
1072 int
aarch64_ext_reg_extended(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)1073 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1074 			  aarch64_opnd_info *info,
1075 			  aarch64_insn code,
1076 			  const aarch64_inst *inst ATTRIBUTE_UNUSED)
1077 {
1078   aarch64_insn value;
1079 
1080   /* Rm */
1081   info->reg.regno = extract_field (FLD_Rm, code, 0);
1082   /* option */
1083   value = extract_field (FLD_option, code, 0);
1084   info->shifter.kind =
1085     aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1086   /* imm3 */
1087   info->shifter.amount = extract_field (FLD_imm3, code,  0);
1088 
1089   /* This makes the constraint checking happy.  */
1090   info->shifter.operator_present = 1;
1091 
1092   /* Assume inst->operands[0].qualifier has been resolved.  */
1093   assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1094   info->qualifier = AARCH64_OPND_QLF_W;
1095   if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1096       && (info->shifter.kind == AARCH64_MOD_UXTX
1097 	  || info->shifter.kind == AARCH64_MOD_SXTX))
1098     info->qualifier = AARCH64_OPND_QLF_X;
1099 
1100   return 1;
1101 }
1102 
1103 /* Decode the shifted register operand for e.g.
1104      SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}.  */
1105 int
aarch64_ext_reg_shifted(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED)1106 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1107 			 aarch64_opnd_info *info,
1108 			 aarch64_insn code,
1109 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1110 {
1111   aarch64_insn value;
1112 
1113   /* Rm */
1114   info->reg.regno = extract_field (FLD_Rm, code, 0);
1115   /* shift */
1116   value = extract_field (FLD_shift, code, 0);
1117   info->shifter.kind =
1118     aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1119   if (info->shifter.kind == AARCH64_MOD_ROR
1120       && inst->opcode->iclass != log_shift)
1121     /* ROR is not available for the shifted register operand in arithmetic
1122        instructions.  */
1123     return 0;
1124   /* imm6 */
1125   info->shifter.amount = extract_field (FLD_imm6, code,  0);
1126 
1127   /* This makes the constraint checking happy.  */
1128   info->shifter.operator_present = 1;
1129 
1130   return 1;
1131 }
1132 
1133 /* Bitfields that are commonly used to encode certain operands' information
1134    may be partially used as part of the base opcode in some instructions.
1135    For example, the bit 1 of the field 'size' in
1136      FCVTXN <Vb><d>, <Va><n>
1137    is actually part of the base opcode, while only size<0> is available
1138    for encoding the register type.  Another example is the AdvSIMD
1139    instruction ORR (register), in which the field 'size' is also used for
1140    the base opcode, leaving only the field 'Q' available to encode the
1141    vector register arrangement specifier '8B' or '16B'.
1142 
1143    This function tries to deduce the qualifier from the value of partially
1144    constrained field(s).  Given the VALUE of such a field or fields, the
1145    qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1146    operand encoding), the function returns the matching qualifier or
1147    AARCH64_OPND_QLF_NIL if nothing matches.
1148 
1149    N.B. CANDIDATES is a group of possible qualifiers that are valid for
1150    one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1151    may end with AARCH64_OPND_QLF_NIL.  */
1152 
1153 static enum aarch64_opnd_qualifier
get_qualifier_from_partial_encoding(aarch64_insn value,const enum aarch64_opnd_qualifier * candidates,aarch64_insn mask)1154 get_qualifier_from_partial_encoding (aarch64_insn value,
1155 				     const enum aarch64_opnd_qualifier* \
1156 				     candidates,
1157 				     aarch64_insn mask)
1158 {
1159   int i;
1160   DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1161   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1162     {
1163       aarch64_insn standard_value;
1164       if (candidates[i] == AARCH64_OPND_QLF_NIL)
1165 	break;
1166       standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1167       if ((standard_value & mask) == (value & mask))
1168 	return candidates[i];
1169     }
1170   return AARCH64_OPND_QLF_NIL;
1171 }
1172 
1173 /* Given a list of qualifier sequences, return all possible valid qualifiers
1174    for operand IDX in QUALIFIERS.
1175    Assume QUALIFIERS is an array whose length is large enough.  */
1176 
1177 static void
get_operand_possible_qualifiers(int idx,const aarch64_opnd_qualifier_seq_t * list,enum aarch64_opnd_qualifier * qualifiers)1178 get_operand_possible_qualifiers (int idx,
1179 				 const aarch64_opnd_qualifier_seq_t *list,
1180 				 enum aarch64_opnd_qualifier *qualifiers)
1181 {
1182   int i;
1183   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1184     if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1185       break;
1186 }
1187 
1188 /* Decode the size Q field for e.g. SHADD.
1189    We tag one operand with the qualifer according to the code;
1190    whether the qualifier is valid for this opcode or not, it is the
1191    duty of the semantic checking.  */
1192 
1193 static int
decode_sizeq(aarch64_inst * inst)1194 decode_sizeq (aarch64_inst *inst)
1195 {
1196   int idx;
1197   enum aarch64_opnd_qualifier qualifier;
1198   aarch64_insn code;
1199   aarch64_insn value, mask;
1200   enum aarch64_field_kind fld_sz;
1201   enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1202 
1203   if (inst->opcode->iclass == asisdlse
1204      || inst->opcode->iclass == asisdlsep
1205      || inst->opcode->iclass == asisdlso
1206      || inst->opcode->iclass == asisdlsop)
1207     fld_sz = FLD_vldst_size;
1208   else
1209     fld_sz = FLD_size;
1210 
1211   code = inst->value;
1212   value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1213   /* Obtain the info that which bits of fields Q and size are actually
1214      available for operand encoding.  Opcodes like FMAXNM and FMLA have
1215      size[1] unavailable.  */
1216   mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1217 
1218   /* The index of the operand we are going to tag a qualifier and the qualifer
1219      itself are reasoned from the value of the size and Q fields and the
1220      possible valid qualifier lists.  */
1221   idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1222   DEBUG_TRACE ("key idx: %d", idx);
1223 
1224   /* For most related instruciton, size:Q are fully available for operand
1225      encoding.  */
1226   if (mask == 0x7)
1227     {
1228       inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1229       return 1;
1230     }
1231 
1232   get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1233 				   candidates);
1234 #ifdef DEBUG_AARCH64
1235   if (debug_dump)
1236     {
1237       int i;
1238       for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1239 	   && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1240 	DEBUG_TRACE ("qualifier %d: %s", i,
1241 		     aarch64_get_qualifier_name(candidates[i]));
1242       DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1243     }
1244 #endif /* DEBUG_AARCH64 */
1245 
1246   qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1247 
1248   if (qualifier == AARCH64_OPND_QLF_NIL)
1249     return 0;
1250 
1251   inst->operands[idx].qualifier = qualifier;
1252   return 1;
1253 }
1254 
1255 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1256      e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
1257 
1258 static int
decode_asimd_fcvt(aarch64_inst * inst)1259 decode_asimd_fcvt (aarch64_inst *inst)
1260 {
1261   aarch64_field field = {0, 0};
1262   aarch64_insn value;
1263   enum aarch64_opnd_qualifier qualifier;
1264 
1265   gen_sub_field (FLD_size, 0, 1, &field);
1266   value = extract_field_2 (&field, inst->value, 0);
1267   qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1268     : AARCH64_OPND_QLF_V_2D;
1269   switch (inst->opcode->op)
1270     {
1271     case OP_FCVTN:
1272     case OP_FCVTN2:
1273       /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
1274       inst->operands[1].qualifier = qualifier;
1275       break;
1276     case OP_FCVTL:
1277     case OP_FCVTL2:
1278       /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>.  */
1279       inst->operands[0].qualifier = qualifier;
1280       break;
1281     default:
1282       assert (0);
1283       return 0;
1284     }
1285 
1286   return 1;
1287 }
1288 
1289 /* Decode size[0], i.e. bit 22, for
1290      e.g. FCVTXN <Vb><d>, <Va><n>.  */
1291 
1292 static int
decode_asisd_fcvtxn(aarch64_inst * inst)1293 decode_asisd_fcvtxn (aarch64_inst *inst)
1294 {
1295   aarch64_field field = {0, 0};
1296   gen_sub_field (FLD_size, 0, 1, &field);
1297   if (!extract_field_2 (&field, inst->value, 0))
1298     return 0;
1299   inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1300   return 1;
1301 }
1302 
1303 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>.  */
1304 static int
decode_fcvt(aarch64_inst * inst)1305 decode_fcvt (aarch64_inst *inst)
1306 {
1307   enum aarch64_opnd_qualifier qualifier;
1308   aarch64_insn value;
1309   const aarch64_field field = {15, 2};
1310 
1311   /* opc dstsize */
1312   value = extract_field_2 (&field, inst->value, 0);
1313   switch (value)
1314     {
1315     case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1316     case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1317     case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1318     default: return 0;
1319     }
1320   inst->operands[0].qualifier = qualifier;
1321 
1322   return 1;
1323 }
1324 
1325 /* Do miscellaneous decodings that are not common enough to be driven by
1326    flags.  */
1327 
1328 static int
do_misc_decoding(aarch64_inst * inst)1329 do_misc_decoding (aarch64_inst *inst)
1330 {
1331   switch (inst->opcode->op)
1332     {
1333     case OP_FCVT:
1334       return decode_fcvt (inst);
1335     case OP_FCVTN:
1336     case OP_FCVTN2:
1337     case OP_FCVTL:
1338     case OP_FCVTL2:
1339       return decode_asimd_fcvt (inst);
1340     case OP_FCVTXN_S:
1341       return decode_asisd_fcvtxn (inst);
1342     default:
1343       return 0;
1344     }
1345 }
1346 
1347 /* Opcodes that have fields shared by multiple operands are usually flagged
1348    with flags.  In this function, we detect such flags, decode the related
1349    field(s) and store the information in one of the related operands.  The
1350    'one' operand is not any operand but one of the operands that can
1351    accommadate all the information that has been decoded.  */
1352 
1353 static int
do_special_decoding(aarch64_inst * inst)1354 do_special_decoding (aarch64_inst *inst)
1355 {
1356   int idx;
1357   aarch64_insn value;
1358   /* Condition for truly conditional executed instructions, e.g. b.cond.  */
1359   if (inst->opcode->flags & F_COND)
1360     {
1361       value = extract_field (FLD_cond2, inst->value, 0);
1362       inst->cond = get_cond_from_value (value);
1363     }
1364   /* 'sf' field.  */
1365   if (inst->opcode->flags & F_SF)
1366     {
1367       idx = select_operand_for_sf_field_coding (inst->opcode);
1368       value = extract_field (FLD_sf, inst->value, 0);
1369       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1370       if ((inst->opcode->flags & F_N)
1371 	  && extract_field (FLD_N, inst->value, 0) != value)
1372 	return 0;
1373     }
1374   /* 'sf' field.  */
1375   if (inst->opcode->flags & F_LSE_SZ)
1376     {
1377       idx = select_operand_for_sf_field_coding (inst->opcode);
1378       value = extract_field (FLD_lse_sz, inst->value, 0);
1379       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1380     }
1381   /* size:Q fields.  */
1382   if (inst->opcode->flags & F_SIZEQ)
1383     return decode_sizeq (inst);
1384 
1385   if (inst->opcode->flags & F_FPTYPE)
1386     {
1387       idx = select_operand_for_fptype_field_coding (inst->opcode);
1388       value = extract_field (FLD_type, inst->value, 0);
1389       switch (value)
1390 	{
1391 	case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1392 	case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1393 	case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1394 	default: return 0;
1395 	}
1396     }
1397 
1398   if (inst->opcode->flags & F_SSIZE)
1399     {
1400       /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1401 	 of the base opcode.  */
1402       aarch64_insn mask;
1403       enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1404       idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1405       value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1406       mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1407       /* For most related instruciton, the 'size' field is fully available for
1408 	 operand encoding.  */
1409       if (mask == 0x3)
1410 	inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1411       else
1412 	{
1413 	  get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1414 					   candidates);
1415 	  inst->operands[idx].qualifier
1416 	    = get_qualifier_from_partial_encoding (value, candidates, mask);
1417 	}
1418     }
1419 
1420   if (inst->opcode->flags & F_T)
1421     {
1422       /* Num of consecutive '0's on the right side of imm5<3:0>.  */
1423       int num = 0;
1424       unsigned val, Q;
1425       assert (aarch64_get_operand_class (inst->opcode->operands[0])
1426 	      == AARCH64_OPND_CLASS_SIMD_REG);
1427       /* imm5<3:0>	q	<t>
1428 	 0000		x	reserved
1429 	 xxx1		0	8b
1430 	 xxx1		1	16b
1431 	 xx10		0	4h
1432 	 xx10		1	8h
1433 	 x100		0	2s
1434 	 x100		1	4s
1435 	 1000		0	reserved
1436 	 1000		1	2d  */
1437       val = extract_field (FLD_imm5, inst->value, 0);
1438       while ((val & 0x1) == 0 && ++num <= 3)
1439 	val >>= 1;
1440       if (num > 3)
1441 	return 0;
1442       Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1443       inst->operands[0].qualifier =
1444 	get_vreg_qualifier_from_value ((num << 1) | Q);
1445     }
1446 
1447   if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1448     {
1449       /* Use Rt to encode in the case of e.g.
1450 	 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}].  */
1451       idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1452       if (idx == -1)
1453 	{
1454 	  /* Otherwise use the result operand, which has to be a integer
1455 	     register.  */
1456 	  assert (aarch64_get_operand_class (inst->opcode->operands[0])
1457 		  == AARCH64_OPND_CLASS_INT_REG);
1458 	  idx = 0;
1459 	}
1460       assert (idx == 0 || idx == 1);
1461       value = extract_field (FLD_Q, inst->value, 0);
1462       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1463     }
1464 
1465   if (inst->opcode->flags & F_LDS_SIZE)
1466     {
1467       aarch64_field field = {0, 0};
1468       assert (aarch64_get_operand_class (inst->opcode->operands[0])
1469 	      == AARCH64_OPND_CLASS_INT_REG);
1470       gen_sub_field (FLD_opc, 0, 1, &field);
1471       value = extract_field_2 (&field, inst->value, 0);
1472       inst->operands[0].qualifier
1473 	= value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
1474     }
1475 
1476   /* Miscellaneous decoding; done as the last step.  */
1477   if (inst->opcode->flags & F_MISC)
1478     return do_misc_decoding (inst);
1479 
1480   return 1;
1481 }
1482 
1483 /* Converters converting a real opcode instruction to its alias form.  */
1484 
1485 /* ROR <Wd>, <Ws>, #<shift>
1486      is equivalent to:
1487    EXTR <Wd>, <Ws>, <Ws>, #<shift>.  */
1488 static int
convert_extr_to_ror(aarch64_inst * inst)1489 convert_extr_to_ror (aarch64_inst *inst)
1490 {
1491   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1492     {
1493       copy_operand_info (inst, 2, 3);
1494       inst->operands[3].type = AARCH64_OPND_NIL;
1495       return 1;
1496     }
1497   return 0;
1498 }
1499 
1500 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1501      is equivalent to:
1502    USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0.  */
1503 static int
convert_shll_to_xtl(aarch64_inst * inst)1504 convert_shll_to_xtl (aarch64_inst *inst)
1505 {
1506   if (inst->operands[2].imm.value == 0)
1507     {
1508       inst->operands[2].type = AARCH64_OPND_NIL;
1509       return 1;
1510     }
1511   return 0;
1512 }
1513 
1514 /* Convert
1515      UBFM <Xd>, <Xn>, #<shift>, #63.
1516    to
1517      LSR <Xd>, <Xn>, #<shift>.  */
1518 static int
convert_bfm_to_sr(aarch64_inst * inst)1519 convert_bfm_to_sr (aarch64_inst *inst)
1520 {
1521   int64_t imms, val;
1522 
1523   imms = inst->operands[3].imm.value;
1524   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1525   if (imms == val)
1526     {
1527       inst->operands[3].type = AARCH64_OPND_NIL;
1528       return 1;
1529     }
1530 
1531   return 0;
1532 }
1533 
1534 /* Convert MOV to ORR.  */
1535 static int
convert_orr_to_mov(aarch64_inst * inst)1536 convert_orr_to_mov (aarch64_inst *inst)
1537 {
1538   /* MOV <Vd>.<T>, <Vn>.<T>
1539      is equivalent to:
1540      ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>.  */
1541   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1542     {
1543       inst->operands[2].type = AARCH64_OPND_NIL;
1544       return 1;
1545     }
1546   return 0;
1547 }
1548 
1549 /* When <imms> >= <immr>, the instruction written:
1550      SBFX <Xd>, <Xn>, #<lsb>, #<width>
1551    is equivalent to:
1552      SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1).  */
1553 
1554 static int
convert_bfm_to_bfx(aarch64_inst * inst)1555 convert_bfm_to_bfx (aarch64_inst *inst)
1556 {
1557   int64_t immr, imms;
1558 
1559   immr = inst->operands[2].imm.value;
1560   imms = inst->operands[3].imm.value;
1561   if (imms >= immr)
1562     {
1563       int64_t lsb = immr;
1564       inst->operands[2].imm.value = lsb;
1565       inst->operands[3].imm.value = imms + 1 - lsb;
1566       /* The two opcodes have different qualifiers for
1567 	 the immediate operands; reset to help the checking.  */
1568       reset_operand_qualifier (inst, 2);
1569       reset_operand_qualifier (inst, 3);
1570       return 1;
1571     }
1572 
1573   return 0;
1574 }
1575 
1576 /* When <imms> < <immr>, the instruction written:
1577      SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1578    is equivalent to:
1579      SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1).  */
1580 
1581 static int
convert_bfm_to_bfi(aarch64_inst * inst)1582 convert_bfm_to_bfi (aarch64_inst *inst)
1583 {
1584   int64_t immr, imms, val;
1585 
1586   immr = inst->operands[2].imm.value;
1587   imms = inst->operands[3].imm.value;
1588   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1589   if (imms < immr)
1590     {
1591       inst->operands[2].imm.value = (val - immr) & (val - 1);
1592       inst->operands[3].imm.value = imms + 1;
1593       /* The two opcodes have different qualifiers for
1594 	 the immediate operands; reset to help the checking.  */
1595       reset_operand_qualifier (inst, 2);
1596       reset_operand_qualifier (inst, 3);
1597       return 1;
1598     }
1599 
1600   return 0;
1601 }
1602 
1603 /* The instruction written:
1604      LSL <Xd>, <Xn>, #<shift>
1605    is equivalent to:
1606      UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>).  */
1607 
1608 static int
convert_ubfm_to_lsl(aarch64_inst * inst)1609 convert_ubfm_to_lsl (aarch64_inst *inst)
1610 {
1611   int64_t immr = inst->operands[2].imm.value;
1612   int64_t imms = inst->operands[3].imm.value;
1613   int64_t val
1614     = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1615 
1616   if ((immr == 0 && imms == val) || immr == imms + 1)
1617     {
1618       inst->operands[3].type = AARCH64_OPND_NIL;
1619       inst->operands[2].imm.value = val - imms;
1620       return 1;
1621     }
1622 
1623   return 0;
1624 }
1625 
1626 /* CINC <Wd>, <Wn>, <cond>
1627      is equivalent to:
1628    CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
1629      where <cond> is not AL or NV.  */
1630 
1631 static int
convert_from_csel(aarch64_inst * inst)1632 convert_from_csel (aarch64_inst *inst)
1633 {
1634   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
1635       && (inst->operands[3].cond->value & 0xe) != 0xe)
1636     {
1637       copy_operand_info (inst, 2, 3);
1638       inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
1639       inst->operands[3].type = AARCH64_OPND_NIL;
1640       return 1;
1641     }
1642   return 0;
1643 }
1644 
1645 /* CSET <Wd>, <cond>
1646      is equivalent to:
1647    CSINC <Wd>, WZR, WZR, invert(<cond>)
1648      where <cond> is not AL or NV.  */
1649 
1650 static int
convert_csinc_to_cset(aarch64_inst * inst)1651 convert_csinc_to_cset (aarch64_inst *inst)
1652 {
1653   if (inst->operands[1].reg.regno == 0x1f
1654       && inst->operands[2].reg.regno == 0x1f
1655       && (inst->operands[3].cond->value & 0xe) != 0xe)
1656     {
1657       copy_operand_info (inst, 1, 3);
1658       inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
1659       inst->operands[3].type = AARCH64_OPND_NIL;
1660       inst->operands[2].type = AARCH64_OPND_NIL;
1661       return 1;
1662     }
1663   return 0;
1664 }
1665 
1666 /* MOV <Wd>, #<imm>
1667      is equivalent to:
1668    MOVZ <Wd>, #<imm16>, LSL #<shift>.
1669 
1670    A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1671    ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1672    or where a MOVN has an immediate that could be encoded by MOVZ, or where
1673    MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1674    machine-instruction mnemonic must be used.  */
1675 
1676 static int
convert_movewide_to_mov(aarch64_inst * inst)1677 convert_movewide_to_mov (aarch64_inst *inst)
1678 {
1679   uint64_t value = inst->operands[1].imm.value;
1680   /* MOVZ/MOVN #0 have a shift amount other than LSL #0.  */
1681   if (value == 0 && inst->operands[1].shifter.amount != 0)
1682     return 0;
1683   inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1684   inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
1685   value <<= inst->operands[1].shifter.amount;
1686   /* As an alias convertor, it has to be clear that the INST->OPCODE
1687      is the opcode of the real instruction.  */
1688   if (inst->opcode->op == OP_MOVN)
1689     {
1690       int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1691       value = ~value;
1692       /* A MOVN has an immediate that could be encoded by MOVZ.  */
1693       if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
1694 	return 0;
1695     }
1696   inst->operands[1].imm.value = value;
1697   inst->operands[1].shifter.amount = 0;
1698   return 1;
1699 }
1700 
1701 /* MOV <Wd>, #<imm>
1702      is equivalent to:
1703    ORR <Wd>, WZR, #<imm>.
1704 
1705    A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1706    ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1707    or where a MOVN has an immediate that could be encoded by MOVZ, or where
1708    MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1709    machine-instruction mnemonic must be used.  */
1710 
1711 static int
convert_movebitmask_to_mov(aarch64_inst * inst)1712 convert_movebitmask_to_mov (aarch64_inst *inst)
1713 {
1714   int is32;
1715   uint64_t value;
1716 
1717   /* Should have been assured by the base opcode value.  */
1718   assert (inst->operands[1].reg.regno == 0x1f);
1719   copy_operand_info (inst, 1, 2);
1720   is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1721   inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1722   value = inst->operands[1].imm.value;
1723   /* ORR has an immediate that could be generated by a MOVZ or MOVN
1724      instruction.  */
1725   if (inst->operands[0].reg.regno != 0x1f
1726       && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
1727 	  || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
1728     return 0;
1729 
1730   inst->operands[2].type = AARCH64_OPND_NIL;
1731   return 1;
1732 }
1733 
1734 /* Some alias opcodes are disassembled by being converted from their real-form.
1735    N.B. INST->OPCODE is the real opcode rather than the alias.  */
1736 
1737 static int
convert_to_alias(aarch64_inst * inst,const aarch64_opcode * alias)1738 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
1739 {
1740   switch (alias->op)
1741     {
1742     case OP_ASR_IMM:
1743     case OP_LSR_IMM:
1744       return convert_bfm_to_sr (inst);
1745     case OP_LSL_IMM:
1746       return convert_ubfm_to_lsl (inst);
1747     case OP_CINC:
1748     case OP_CINV:
1749     case OP_CNEG:
1750       return convert_from_csel (inst);
1751     case OP_CSET:
1752     case OP_CSETM:
1753       return convert_csinc_to_cset (inst);
1754     case OP_UBFX:
1755     case OP_BFXIL:
1756     case OP_SBFX:
1757       return convert_bfm_to_bfx (inst);
1758     case OP_SBFIZ:
1759     case OP_BFI:
1760     case OP_UBFIZ:
1761       return convert_bfm_to_bfi (inst);
1762     case OP_MOV_V:
1763       return convert_orr_to_mov (inst);
1764     case OP_MOV_IMM_WIDE:
1765     case OP_MOV_IMM_WIDEN:
1766       return convert_movewide_to_mov (inst);
1767     case OP_MOV_IMM_LOG:
1768       return convert_movebitmask_to_mov (inst);
1769     case OP_ROR_IMM:
1770       return convert_extr_to_ror (inst);
1771     case OP_SXTL:
1772     case OP_SXTL2:
1773     case OP_UXTL:
1774     case OP_UXTL2:
1775       return convert_shll_to_xtl (inst);
1776     default:
1777       return 0;
1778     }
1779 }
1780 
1781 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
1782 				  aarch64_inst *, int);
1783 
1784 /* Given the instruction information in *INST, check if the instruction has
1785    any alias form that can be used to represent *INST.  If the answer is yes,
1786    update *INST to be in the form of the determined alias.  */
1787 
1788 /* In the opcode description table, the following flags are used in opcode
1789    entries to help establish the relations between the real and alias opcodes:
1790 
1791 	F_ALIAS:	opcode is an alias
1792 	F_HAS_ALIAS:	opcode has alias(es)
1793 	F_P1
1794 	F_P2
1795 	F_P3:		Disassembly preference priority 1-3 (the larger the
1796 			higher).  If nothing is specified, it is the priority
1797 			0 by default, i.e. the lowest priority.
1798 
1799    Although the relation between the machine and the alias instructions are not
1800    explicitly described, it can be easily determined from the base opcode
1801    values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
1802    description entries:
1803 
1804    The mask of an alias opcode must be equal to or a super-set (i.e. more
1805    constrained) of that of the aliased opcode; so is the base opcode value.
1806 
1807    if (opcode_has_alias (real) && alias_opcode_p (opcode)
1808        && (opcode->mask & real->mask) == real->mask
1809        && (real->mask & opcode->opcode) == (real->mask & real->opcode))
1810    then OPCODE is an alias of, and only of, the REAL instruction
1811 
1812    The alias relationship is forced flat-structured to keep related algorithm
1813    simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
1814 
1815    During the disassembling, the decoding decision tree (in
1816    opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
1817    if the decoding of such a machine instruction succeeds (and -Mno-aliases is
1818    not specified), the disassembler will check whether there is any alias
1819    instruction exists for this real instruction.  If there is, the disassembler
1820    will try to disassemble the 32-bit binary again using the alias's rule, or
1821    try to convert the IR to the form of the alias.  In the case of the multiple
1822    aliases, the aliases are tried one by one from the highest priority
1823    (currently the flag F_P3) to the lowest priority (no priority flag), and the
1824    first succeeds first adopted.
1825 
1826    You may ask why there is a need for the conversion of IR from one form to
1827    another in handling certain aliases.  This is because on one hand it avoids
1828    adding more operand code to handle unusual encoding/decoding; on other
1829    hand, during the disassembling, the conversion is an effective approach to
1830    check the condition of an alias (as an alias may be adopted only if certain
1831    conditions are met).
1832 
1833    In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
1834    aarch64_opcode_table and generated aarch64_find_alias_opcode and
1835    aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help.  */
1836 
1837 static void
determine_disassembling_preference(struct aarch64_inst * inst)1838 determine_disassembling_preference (struct aarch64_inst *inst)
1839 {
1840   const aarch64_opcode *opcode;
1841   const aarch64_opcode *alias;
1842 
1843   opcode = inst->opcode;
1844 
1845   /* This opcode does not have an alias, so use itself.  */
1846   if (opcode_has_alias (opcode) == FALSE)
1847     return;
1848 
1849   alias = aarch64_find_alias_opcode (opcode);
1850   assert (alias);
1851 
1852 #ifdef DEBUG_AARCH64
1853   if (debug_dump)
1854     {
1855       const aarch64_opcode *tmp = alias;
1856       printf ("####   LIST    orderd: ");
1857       while (tmp)
1858 	{
1859 	  printf ("%s, ", tmp->name);
1860 	  tmp = aarch64_find_next_alias_opcode (tmp);
1861 	}
1862       printf ("\n");
1863     }
1864 #endif /* DEBUG_AARCH64 */
1865 
1866   for (; alias; alias = aarch64_find_next_alias_opcode (alias))
1867     {
1868       DEBUG_TRACE ("try %s", alias->name);
1869       assert (alias_opcode_p (alias));
1870 
1871       /* An alias can be a pseudo opcode which will never be used in the
1872 	 disassembly, e.g. BIC logical immediate is such a pseudo opcode
1873 	 aliasing AND.  */
1874       if (pseudo_opcode_p (alias))
1875 	{
1876 	  DEBUG_TRACE ("skip pseudo %s", alias->name);
1877 	  continue;
1878 	}
1879 
1880       if ((inst->value & alias->mask) != alias->opcode)
1881 	{
1882 	  DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
1883 	  continue;
1884 	}
1885       /* No need to do any complicated transformation on operands, if the alias
1886 	 opcode does not have any operand.  */
1887       if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
1888 	{
1889 	  DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
1890 	  aarch64_replace_opcode (inst, alias);
1891 	  return;
1892 	}
1893       if (alias->flags & F_CONV)
1894 	{
1895 	  aarch64_inst copy;
1896 	  memcpy (&copy, inst, sizeof (aarch64_inst));
1897 	  /* ALIAS is the preference as long as the instruction can be
1898 	     successfully converted to the form of ALIAS.  */
1899 	  if (convert_to_alias (&copy, alias) == 1)
1900 	    {
1901 	      aarch64_replace_opcode (&copy, alias);
1902 	      assert (aarch64_match_operands_constraint (&copy, NULL));
1903 	      DEBUG_TRACE ("succeed with %s via conversion", alias->name);
1904 	      memcpy (inst, &copy, sizeof (aarch64_inst));
1905 	      return;
1906 	    }
1907 	}
1908       else
1909 	{
1910 	  /* Directly decode the alias opcode.  */
1911 	  aarch64_inst temp;
1912 	  memset (&temp, '\0', sizeof (aarch64_inst));
1913 	  if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
1914 	    {
1915 	      DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
1916 	      memcpy (inst, &temp, sizeof (aarch64_inst));
1917 	      return;
1918 	    }
1919 	}
1920     }
1921 }
1922 
1923 /* Decode the CODE according to OPCODE; fill INST.  Return 0 if the decoding
1924    fails, which meanes that CODE is not an instruction of OPCODE; otherwise
1925    return 1.
1926 
1927    If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
1928    determined and used to disassemble CODE; this is done just before the
1929    return.  */
1930 
1931 static int
aarch64_opcode_decode(const aarch64_opcode * opcode,const aarch64_insn code,aarch64_inst * inst,int noaliases_p)1932 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
1933 		       aarch64_inst *inst, int noaliases_p)
1934 {
1935   int i;
1936 
1937   DEBUG_TRACE ("enter with %s", opcode->name);
1938 
1939   assert (opcode && inst);
1940 
1941   /* Check the base opcode.  */
1942   if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
1943     {
1944       DEBUG_TRACE ("base opcode match FAIL");
1945       goto decode_fail;
1946     }
1947 
1948   /* Clear inst.  */
1949   memset (inst, '\0', sizeof (aarch64_inst));
1950 
1951   inst->opcode = opcode;
1952   inst->value = code;
1953 
1954   /* Assign operand codes and indexes.  */
1955   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1956     {
1957       if (opcode->operands[i] == AARCH64_OPND_NIL)
1958 	break;
1959       inst->operands[i].type = opcode->operands[i];
1960       inst->operands[i].idx = i;
1961     }
1962 
1963   /* Call the opcode decoder indicated by flags.  */
1964   if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
1965     {
1966       DEBUG_TRACE ("opcode flag-based decoder FAIL");
1967       goto decode_fail;
1968     }
1969 
1970   /* Call operand decoders.  */
1971   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1972     {
1973       const aarch64_operand *opnd;
1974       enum aarch64_opnd type;
1975       type = opcode->operands[i];
1976       if (type == AARCH64_OPND_NIL)
1977 	break;
1978       opnd = &aarch64_operands[type];
1979       if (operand_has_extractor (opnd)
1980 	  && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
1981 	{
1982 	  DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
1983 	  goto decode_fail;
1984 	}
1985     }
1986 
1987   /* Match the qualifiers.  */
1988   if (aarch64_match_operands_constraint (inst, NULL) == 1)
1989     {
1990       /* Arriving here, the CODE has been determined as a valid instruction
1991 	 of OPCODE and *INST has been filled with information of this OPCODE
1992 	 instruction.  Before the return, check if the instruction has any
1993 	 alias and should be disassembled in the form of its alias instead.
1994 	 If the answer is yes, *INST will be updated.  */
1995       if (!noaliases_p)
1996 	determine_disassembling_preference (inst);
1997       DEBUG_TRACE ("SUCCESS");
1998       return 1;
1999     }
2000   else
2001     {
2002       DEBUG_TRACE ("constraint matching FAIL");
2003     }
2004 
2005 decode_fail:
2006   return 0;
2007 }
2008 
2009 /* This does some user-friendly fix-up to *INST.  It is currently focus on
2010    the adjustment of qualifiers to help the printed instruction
2011    recognized/understood more easily.  */
2012 
2013 static void
user_friendly_fixup(aarch64_inst * inst)2014 user_friendly_fixup (aarch64_inst *inst)
2015 {
2016   switch (inst->opcode->iclass)
2017     {
2018     case testbranch:
2019       /* TBNZ Xn|Wn, #uimm6, label
2020 	 Test and Branch Not Zero: conditionally jumps to label if bit number
2021 	 uimm6 in register Xn is not zero.  The bit number implies the width of
2022 	 the register, which may be written and should be disassembled as Wn if
2023 	 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2024 	 */
2025       if (inst->operands[1].imm.value < 32)
2026 	inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2027       break;
2028     default: break;
2029     }
2030 }
2031 
2032 /* Decode INSN and fill in *INST the instruction information.  */
2033 
2034 static int
disas_aarch64_insn(uint64_t pc ATTRIBUTE_UNUSED,uint32_t insn,aarch64_inst * inst)2035 disas_aarch64_insn (uint64_t pc ATTRIBUTE_UNUSED, uint32_t insn,
2036 		    aarch64_inst *inst)
2037 {
2038   const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2039 
2040 #ifdef DEBUG_AARCH64
2041   if (debug_dump)
2042     {
2043       const aarch64_opcode *tmp = opcode;
2044       printf ("\n");
2045       DEBUG_TRACE ("opcode lookup:");
2046       while (tmp != NULL)
2047 	{
2048 	  aarch64_verbose ("  %s", tmp->name);
2049 	  tmp = aarch64_find_next_opcode (tmp);
2050 	}
2051     }
2052 #endif /* DEBUG_AARCH64 */
2053 
2054   /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2055      distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2056      opcode field and value, apart from the difference that one of them has an
2057      extra field as part of the opcode, but such a field is used for operand
2058      encoding in other opcode(s) ('immh' in the case of the example).  */
2059   while (opcode != NULL)
2060     {
2061       /* But only one opcode can be decoded successfully for, as the
2062 	 decoding routine will check the constraint carefully.  */
2063       if (aarch64_opcode_decode (opcode, insn, inst, no_aliases) == 1)
2064 	return ERR_OK;
2065       opcode = aarch64_find_next_opcode (opcode);
2066     }
2067 
2068   return ERR_UND;
2069 }
2070 
2071 /* Print operands.  */
2072 
2073 static void
print_operands(bfd_vma pc,const aarch64_opcode * opcode,const aarch64_opnd_info * opnds,struct disassemble_info * info)2074 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2075 		const aarch64_opnd_info *opnds, struct disassemble_info *info)
2076 {
2077   int i, pcrel_p, num_printed;
2078   for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2079     {
2080       const size_t size = 128;
2081       char str[size];
2082       /* We regard the opcode operand info more, however we also look into
2083 	 the inst->operands to support the disassembling of the optional
2084 	 operand.
2085 	 The two operand code should be the same in all cases, apart from
2086 	 when the operand can be optional.  */
2087       if (opcode->operands[i] == AARCH64_OPND_NIL
2088 	  || opnds[i].type == AARCH64_OPND_NIL)
2089 	break;
2090 
2091       /* Generate the operand string in STR.  */
2092       aarch64_print_operand (str, size, pc, opcode, opnds, i, &pcrel_p,
2093 			     &info->target);
2094 
2095       /* Print the delimiter (taking account of omitted operand(s)).  */
2096       if (str[0] != '\0')
2097 	(*info->fprintf_func) (info->stream, "%s",
2098 			       num_printed++ == 0 ? "\t" : ", ");
2099 
2100       /* Print the operand.  */
2101       if (pcrel_p)
2102 	(*info->print_address_func) (info->target, info);
2103       else
2104 	(*info->fprintf_func) (info->stream, "%s", str);
2105     }
2106 }
2107 
2108 /* Print the instruction mnemonic name.  */
2109 
2110 static void
print_mnemonic_name(const aarch64_inst * inst,struct disassemble_info * info)2111 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2112 {
2113   if (inst->opcode->flags & F_COND)
2114     {
2115       /* For instructions that are truly conditionally executed, e.g. b.cond,
2116 	 prepare the full mnemonic name with the corresponding condition
2117 	 suffix.  */
2118       char name[8], *ptr;
2119       size_t len;
2120 
2121       ptr = strchr (inst->opcode->name, '.');
2122       assert (ptr && inst->cond);
2123       len = ptr - inst->opcode->name;
2124       assert (len < 8);
2125       strncpy (name, inst->opcode->name, len);
2126       name [len] = '\0';
2127       (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2128     }
2129   else
2130     (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2131 }
2132 
2133 /* Print the instruction according to *INST.  */
2134 
2135 static void
print_aarch64_insn(bfd_vma pc,const aarch64_inst * inst,struct disassemble_info * info)2136 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2137 		    struct disassemble_info *info)
2138 {
2139   print_mnemonic_name (inst, info);
2140   print_operands (pc, inst->opcode, inst->operands, info);
2141 }
2142 
2143 /* Entry-point of the instruction disassembler and printer.  */
2144 
2145 static void
print_insn_aarch64_word(bfd_vma pc,uint32_t word,struct disassemble_info * info)2146 print_insn_aarch64_word (bfd_vma pc,
2147 			 uint32_t word,
2148 			 struct disassemble_info *info)
2149 {
2150   static const char *err_msg[6] =
2151     {
2152       [ERR_OK]   = "_",
2153       [-ERR_UND] = "undefined",
2154       [-ERR_UNP] = "unpredictable",
2155       [-ERR_NYI] = "NYI"
2156     };
2157 
2158   int ret;
2159   aarch64_inst inst;
2160 
2161   info->insn_info_valid = 1;
2162   info->branch_delay_insns = 0;
2163   info->data_size = 0;
2164   info->target = 0;
2165   info->target2 = 0;
2166 
2167   if (info->flags & INSN_HAS_RELOC)
2168     /* If the instruction has a reloc associated with it, then
2169        the offset field in the instruction will actually be the
2170        addend for the reloc.  (If we are using REL type relocs).
2171        In such cases, we can ignore the pc when computing
2172        addresses, since the addend is not currently pc-relative.  */
2173     pc = 0;
2174 
2175   ret = disas_aarch64_insn (pc, word, &inst);
2176 
2177   if (((word >> 21) & 0x3ff) == 1)
2178     {
2179       /* RESERVED for ALES.  */
2180       assert (ret != ERR_OK);
2181       ret = ERR_NYI;
2182     }
2183 
2184   switch (ret)
2185     {
2186     case ERR_UND:
2187     case ERR_UNP:
2188     case ERR_NYI:
2189       /* Handle undefined instructions.  */
2190       info->insn_type = dis_noninsn;
2191       (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2192 			     word, err_msg[-ret]);
2193       break;
2194     case ERR_OK:
2195       user_friendly_fixup (&inst);
2196       print_aarch64_insn (pc, &inst, info);
2197       break;
2198     default:
2199       abort ();
2200     }
2201 }
2202 
2203 /* Disallow mapping symbols ($x, $d etc) from
2204    being displayed in symbol relative addresses.  */
2205 
2206 bfd_boolean
aarch64_symbol_is_valid(asymbol * sym,struct disassemble_info * info ATTRIBUTE_UNUSED)2207 aarch64_symbol_is_valid (asymbol * sym,
2208 			 struct disassemble_info * info ATTRIBUTE_UNUSED)
2209 {
2210   const char * name;
2211 
2212   if (sym == NULL)
2213     return FALSE;
2214 
2215   name = bfd_asymbol_name (sym);
2216 
2217   return name
2218     && (name[0] != '$'
2219 	|| (name[1] != 'x' && name[1] != 'd')
2220 	|| (name[2] != '\0' && name[2] != '.'));
2221 }
2222 
2223 /* Print data bytes on INFO->STREAM.  */
2224 
2225 static void
print_insn_data(bfd_vma pc ATTRIBUTE_UNUSED,uint32_t word,struct disassemble_info * info)2226 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2227 		 uint32_t word,
2228 		 struct disassemble_info *info)
2229 {
2230   switch (info->bytes_per_chunk)
2231     {
2232     case 1:
2233       info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2234       break;
2235     case 2:
2236       info->fprintf_func (info->stream, ".short\t0x%04x", word);
2237       break;
2238     case 4:
2239       info->fprintf_func (info->stream, ".word\t0x%08x", word);
2240       break;
2241     default:
2242       abort ();
2243     }
2244 }
2245 
2246 /* Try to infer the code or data type from a symbol.
2247    Returns nonzero if *MAP_TYPE was set.  */
2248 
2249 static int
get_sym_code_type(struct disassemble_info * info,int n,enum map_type * map_type)2250 get_sym_code_type (struct disassemble_info *info, int n,
2251 		   enum map_type *map_type)
2252 {
2253   elf_symbol_type *es;
2254   unsigned int type;
2255   const char *name;
2256 
2257   es = *(elf_symbol_type **)(info->symtab + n);
2258   type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2259 
2260   /* If the symbol has function type then use that.  */
2261   if (type == STT_FUNC)
2262     {
2263       *map_type = MAP_INSN;
2264       return TRUE;
2265     }
2266 
2267   /* Check for mapping symbols.  */
2268   name = bfd_asymbol_name(info->symtab[n]);
2269   if (name[0] == '$'
2270       && (name[1] == 'x' || name[1] == 'd')
2271       && (name[2] == '\0' || name[2] == '.'))
2272     {
2273       *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2274       return TRUE;
2275     }
2276 
2277   return FALSE;
2278 }
2279 
2280 /* Entry-point of the AArch64 disassembler.  */
2281 
2282 int
print_insn_aarch64(bfd_vma pc,struct disassemble_info * info)2283 print_insn_aarch64 (bfd_vma pc,
2284 		    struct disassemble_info *info)
2285 {
2286   bfd_byte	buffer[INSNLEN];
2287   int		status;
2288   void		(*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2289   bfd_boolean   found = FALSE;
2290   unsigned int	size = 4;
2291   unsigned long	data;
2292 
2293   if (info->disassembler_options)
2294     {
2295       set_default_aarch64_dis_options (info);
2296 
2297       parse_aarch64_dis_options (info->disassembler_options);
2298 
2299       /* To avoid repeated parsing of these options, we remove them here.  */
2300       info->disassembler_options = NULL;
2301     }
2302 
2303   /* Aarch64 instructions are always little-endian */
2304   info->endian_code = BFD_ENDIAN_LITTLE;
2305 
2306   /* First check the full symtab for a mapping symbol, even if there
2307      are no usable non-mapping symbols for this address.  */
2308   if (info->symtab_size != 0
2309       && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2310     {
2311       enum map_type type = MAP_INSN;
2312       int last_sym = -1;
2313       bfd_vma addr;
2314       int n;
2315 
2316       if (pc <= last_mapping_addr)
2317 	last_mapping_sym = -1;
2318 
2319       /* Start scanning at the start of the function, or wherever
2320 	 we finished last time.  */
2321       n = info->symtab_pos + 1;
2322       if (n < last_mapping_sym)
2323 	n = last_mapping_sym;
2324 
2325       /* Scan up to the location being disassembled.  */
2326       for (; n < info->symtab_size; n++)
2327 	{
2328 	  addr = bfd_asymbol_value (info->symtab[n]);
2329 	  if (addr > pc)
2330 	    break;
2331 	  if ((info->section == NULL
2332 	       || info->section == info->symtab[n]->section)
2333 	      && get_sym_code_type (info, n, &type))
2334 	    {
2335 	      last_sym = n;
2336 	      found = TRUE;
2337 	    }
2338 	}
2339 
2340       if (!found)
2341 	{
2342 	  n = info->symtab_pos;
2343 	  if (n < last_mapping_sym)
2344 	    n = last_mapping_sym;
2345 
2346 	  /* No mapping symbol found at this address.  Look backwards
2347 	     for a preceeding one.  */
2348 	  for (; n >= 0; n--)
2349 	    {
2350 	      if (get_sym_code_type (info, n, &type))
2351 		{
2352 		  last_sym = n;
2353 		  found = TRUE;
2354 		  break;
2355 		}
2356 	    }
2357 	}
2358 
2359       last_mapping_sym = last_sym;
2360       last_type = type;
2361 
2362       /* Look a little bit ahead to see if we should print out
2363 	 less than four bytes of data.  If there's a symbol,
2364 	 mapping or otherwise, after two bytes then don't
2365 	 print more.  */
2366       if (last_type == MAP_DATA)
2367 	{
2368 	  size = 4 - (pc & 3);
2369 	  for (n = last_sym + 1; n < info->symtab_size; n++)
2370 	    {
2371 	      addr = bfd_asymbol_value (info->symtab[n]);
2372 	      if (addr > pc)
2373 		{
2374 		  if (addr - pc < size)
2375 		    size = addr - pc;
2376 		  break;
2377 		}
2378 	    }
2379 	  /* If the next symbol is after three bytes, we need to
2380 	     print only part of the data, so that we can use either
2381 	     .byte or .short.  */
2382 	  if (size == 3)
2383 	    size = (pc & 1) ? 1 : 2;
2384 	}
2385     }
2386 
2387   if (last_type == MAP_DATA)
2388     {
2389       /* size was set above.  */
2390       info->bytes_per_chunk = size;
2391       info->display_endian = info->endian;
2392       printer = print_insn_data;
2393     }
2394   else
2395     {
2396       info->bytes_per_chunk = size = INSNLEN;
2397       info->display_endian = info->endian_code;
2398       printer = print_insn_aarch64_word;
2399     }
2400 
2401   status = (*info->read_memory_func) (pc, buffer, size, info);
2402   if (status != 0)
2403     {
2404       (*info->memory_error_func) (status, pc, info);
2405       return -1;
2406     }
2407 
2408   data = bfd_get_bits (buffer, size * 8,
2409 		       info->display_endian == BFD_ENDIAN_BIG);
2410 
2411   (*printer) (pc, data, info);
2412 
2413   return size;
2414 }
2415 
2416 void
print_aarch64_disassembler_options(FILE * stream)2417 print_aarch64_disassembler_options (FILE *stream)
2418 {
2419   fprintf (stream, _("\n\
2420 The following AARCH64 specific disassembler options are supported for use\n\
2421 with the -M switch (multiple options should be separated by commas):\n"));
2422 
2423   fprintf (stream, _("\n\
2424   no-aliases         Don't print instruction aliases.\n"));
2425 
2426   fprintf (stream, _("\n\
2427   aliases            Do print instruction aliases.\n"));
2428 
2429 #ifdef DEBUG_AARCH64
2430   fprintf (stream, _("\n\
2431   debug_dump         Temp switch for debug trace.\n"));
2432 #endif /* DEBUG_AARCH64 */
2433 
2434   fprintf (stream, _("\n"));
2435 }
2436