diff --git a/src/backend/x64/emit_x64_vector.cpp b/src/backend/x64/emit_x64_vector.cpp index 7e7f7160..186170ec 100644 --- a/src/backend/x64/emit_x64_vector.cpp +++ b/src/backend/x64/emit_x64_vector.cpp @@ -1465,20 +1465,21 @@ void EmitX64::EmitVectorLogicalShiftLeft8(EmitContext& ctx, IR::Inst* inst) { const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); const u8 shift_amount = args[1].GetImmediateU8(); - if (shift_amount == 1) { + if (shift_amount == 0) { + // do nothing + } else if (shift_amount >= 8) { + code.pxor(result, result); + } else if (shift_amount == 1) { code.paddb(result, result); - } else if (shift_amount > 0) { - if (code.HasAVX512_Icelake()) { - // Galois 8x8 identity matrix, bit-shifted by the shift-amount - const u64 shift_matrix = 0x0102040810204080 >> (shift_amount * 8); - code.vgf2p8affineqb(result, result, code.MConst(xword_b, shift_matrix), 0); - } else { - const u64 replicand = (0xFFULL << shift_amount) & 0xFF; - const u64 mask = Common::Replicate(replicand, Common::BitSize()); + } else if (code.HasAVX512_Icelake()) { + const u64 shift_matrix = 0x0102040810204080 >> (shift_amount * 8); + code.vgf2p8affineqb(result, result, code.MConst(xword_b, shift_matrix), 0); + } else { + const u64 replicand = (0xFFULL << shift_amount) & 0xFF; + const u64 mask = Common::Replicate(replicand, Common::BitSize()); - code.psllw(result, shift_amount); - code.pand(result, code.MConst(xword, mask, mask)); - } + code.psllw(result, shift_amount); + code.pand(result, code.MConst(xword, mask, mask)); } ctx.reg_alloc.DefineValue(inst, result);