* x86arch.h (x86_insn): Combine shift_op, signext_imm8_op, shortmov_op, and

address16_op flags into a single postop enum.
* x86id.re (yasm_x86__finalie_insn): Set new enum rather than flags.
* x86bc.c: Use new combined enum.

svn path=/trunk/yasm/; revision=1268
0.5.0rc2
Peter Johnson 20 years ago
parent ad6b7f186a
commit be801e73db
  1. 68
      modules/arch/x86/x86arch.h
  2. 37
      modules/arch/x86/x86bc.c
  3. 15
      modules/arch/x86/x86id.re

@ -180,38 +180,42 @@ typedef struct x86_insn {
unsigned char rex; /* REX AMD64 extension, 0 if none,
0xff if not allowed (high 8 bit reg used) */
/* HACK, but a space-saving one: shift opcodes have an immediate
* form and a ,1 form (with no immediate). In the parser, we
* set this and opcode_len=1, but store the ,1 version in the
* second byte of the opcode array. We then choose between the
* two versions once we know the actual value of imm (because we
* don't know it in the parser module).
*
* A override to force the imm version should just leave this at
* 0. Then later code won't know the ,1 version even exists.
* TODO: Figure out how this affects CPU flags processing.
*
* Call x86_SetInsnShiftFlag() to set this flag to 1.
*/
unsigned char shift_op;
/* HACK, similar to that for shift_op above, for optimizing instructions
* that take a sign-extended imm8 as well as imm values (eg, the arith
* instructions and a subset of the imul instructions).
*/
unsigned char signext_imm8_op;
/* HACK, similar to those above, for optimizing long (modrm+sib) mov
* instructions in amd64 into short mov instructions if a 32-bit address
* override is applied in 64-bit mode to an EA of just an offset (no
* registers) and the target register is al/ax/eax/rax.
*/
unsigned char shortmov_op;
/* Override any attempt at address-size override to 16 bits, and never
* generate a prefix. This is used for the ENTER opcode.
*/
unsigned char address16_op;
/* Postponed (from parsing to later binding) action options. */
enum {
/* None */
X86_POSTOP_NONE = 0,
/* Shift opcodes have an immediate form and a ,1 form (with no
* immediate). In the parser, we set this and opcode_len=1, but store
* the ,1 version in the second byte of the opcode array. We then
* choose between the two versions once we know the actual value of
* imm (because we don't know it in the parser module).
*
* A override to force the imm version should just leave this at
* 0. Then later code won't know the ,1 version even exists.
* TODO: Figure out how this affects CPU flags processing.
*/
X86_POSTOP_SHIFT,
/* Instructions that take a sign-extended imm8 as well as imm values
* (eg, the arith instructions and a subset of the imul instructions)
* should set this and put the imm8 form in the second byte of the
* opcode.
*/
X86_POSTOP_SIGNEXT_IMM8,
/* Long (modrm+sib) mov instructions in amd64 can be optimized into
* short mov instructions if a 32-bit address override is applied in
* 64-bit mode to an EA of just an offset (no registers) and the
* target register is al/ax/eax/rax.
*/
X86_POSTOP_SHORTMOV,
/* Override any attempt at address-size override to 16 bits, and never
* generate a prefix. This is used for the ENTER opcode.
*/
X86_POSTOP_ADDRESS16
} postop;
} x86_insn;
typedef struct x86_jmp {

@ -412,10 +412,10 @@ x86_bc_insn_print(const void *contents, FILE *f, int indent_level)
}
x86_opcode_print(&insn->opcode, f, indent_level);
x86_common_print(&insn->common, f, indent_level);
fprintf(f, "%*sSpPre=%02x REX=%03o ShiftOp=%u\n", indent_level, "",
fprintf(f, "%*sSpPre=%02x REX=%03o PostOp=%u\n", indent_level, "",
(unsigned int)insn->special_prefix,
(unsigned int)insn->rex,
(unsigned int)insn->shift_op);
(unsigned int)insn->postop);
}
static void
@ -514,8 +514,8 @@ x86_bc_insn_resolve(yasm_bytecode *bc, int save,
assert(temp != NULL);
/* Handle shortmov special-casing */
if (insn->shortmov_op && insn->common.mode_bits == 64 &&
insn->common.addrsize == 32 &&
if (insn->postop == X86_POSTOP_SHORTMOV &&
insn->common.mode_bits == 64 && insn->common.addrsize == 32 &&
!yasm_expr__contains(temp, YASM_EXPR_REG)) {
yasm_x86__ea_set_disponly((yasm_effaddr *)&eat);
@ -531,10 +531,11 @@ x86_bc_insn_resolve(yasm_bytecode *bc, int save,
* displacement.
*/
switch (yasm_x86__expr_checkea(&temp, &insn->common.addrsize,
insn->common.mode_bits, ea->nosplit, insn->address16_op,
&displen, &eat.modrm, &eat.valid_modrm, &eat.need_modrm,
&eat.sib, &eat.valid_sib, &eat.need_sib, &eat.pcrel,
&insn->rex, calc_bc_dist)) {
insn->common.mode_bits, ea->nosplit,
insn->postop == X86_POSTOP_ADDRESS16, &displen, &eat.modrm,
&eat.valid_modrm, &eat.need_modrm, &eat.sib,
&eat.valid_sib, &eat.need_sib, &eat.pcrel, &insn->rex,
calc_bc_dist)) {
case 1:
yasm_expr_destroy(temp);
/* failed, don't bother checking rest of insn */
@ -558,8 +559,8 @@ x86_bc_insn_resolve(yasm_bytecode *bc, int save,
displen = (insn->common.addrsize == 16) ? 2U : 4U;
}
/* Handle address16_op case */
if (insn->address16_op)
/* Handle address16 postop case */
if (insn->postop == X86_POSTOP_ADDRESS16)
insn->common.addrsize = 0;
/* If we had forced ea->len but had to override, save it now */
@ -591,8 +592,8 @@ x86_bc_insn_resolve(yasm_bytecode *bc, int save,
/* TODO: check imm->len vs. sized len from expr? */
/* Handle signext_imm8_op special-casing */
if (insn->signext_imm8_op && temp &&
/* Handle signext_imm8 postop special-casing */
if (insn->postop == X86_POSTOP_SIGNEXT_IMM8 && temp &&
(num = yasm_expr_get_intnum(&temp, calc_bc_dist))) {
if (num) {
int val = yasm_intnum_get_int(num);
@ -610,11 +611,11 @@ x86_bc_insn_resolve(yasm_bytecode *bc, int save,
}
/* Not really necessary, but saves confusion over it. */
if (save)
insn->signext_imm8_op = 0;
insn->postop = X86_POSTOP_NONE;
}
/* Handle shift_op special-casing */
if (insn->shift_op && temp &&
/* Handle shift postop special-casing */
if (insn->postop == X86_POSTOP_SHIFT && temp &&
(num = yasm_expr_get_intnum(&temp, calc_bc_dist))) {
if (num && yasm_intnum_get_uint(num) == 1) {
/* We can use the ,1 form: no immediate (set to 0 len) */
@ -633,7 +634,7 @@ x86_bc_insn_resolve(yasm_bytecode *bc, int save,
/* Not really necessary, but saves confusion over it. */
if (save)
insn->shift_op = 0;
insn->postop = X86_POSTOP_NONE;
}
yasm_expr_destroy(temp);
@ -906,8 +907,8 @@ x86_bc_insn_tobytes(yasm_bytecode *bc, unsigned char **bufp, void *d,
*/
if (yasm_x86__expr_checkea(&ea->disp, &addrsize,
insn->common.mode_bits, ea->nosplit,
insn->address16_op, &displen,
&eat.modrm, &eat.valid_modrm,
insn->postop == X86_POSTOP_ADDRESS16,
&displen, &eat.modrm, &eat.valid_modrm,
&eat.need_modrm, &eat.sib,
&eat.valid_sib, &eat.need_sib,
&eat.pcrel, &insn->rex,

@ -2595,10 +2595,7 @@ yasm_x86__finalize_insn(yasm_arch *arch, yasm_bytecode *bc,
spare = info->spare;
im_len = 0;
im_sign = 0;
insn->shift_op = 0;
insn->signext_imm8_op = 0;
insn->shortmov_op = 0;
insn->address16_op = 0;
insn->postop = X86_POSTOP_NONE;
insn->rex = 0;
/* Apply modifiers */
@ -2773,16 +2770,16 @@ yasm_x86__finalize_insn(yasm_arch *arch, yasm_bytecode *bc,
case OPAP_None:
break;
case OPAP_ShiftOp:
insn->shift_op = 1;
insn->postop = X86_POSTOP_SHIFT;
break;
case OPAP_SImm8Avail:
insn->signext_imm8_op = 1;
insn->postop = X86_POSTOP_SIGNEXT_IMM8;
break;
case OPAP_ShortMov:
insn->shortmov_op = 1;
insn->postop = X86_POSTOP_SHORTMOV;
break;
case OPAP_A16:
insn->address16_op = 1;
insn->postop = X86_POSTOP_ADDRESS16;
break;
default:
yasm_internal_error(
@ -2811,7 +2808,7 @@ yasm_x86__finalize_insn(yasm_arch *arch, yasm_bytecode *bc,
yasm_x86__bc_apply_prefixes((x86_common *)insn, num_prefixes, prefixes,
bc->line);
if (insn->address16_op && insn->common.addrsize) {
if (insn->postop == X86_POSTOP_ADDRESS16 && insn->common.addrsize) {
yasm__warning(YASM_WARN_GENERAL, bc->line,
N_("address size override ignored"));
insn->common.addrsize = 0;

Loading…
Cancel
Save