@@ -3941,7 +3941,7 @@ multiclass avx512_move_scalar<string asm, SDNode OpNode,
3941
3941
EVEX, Sched<[WriteFStore]>;
3942
3942
let mayStore = 1, hasSideEffects = 0 in
3943
3943
def mrk: AVX512PI<0x11, MRMDestMem, (outs),
3944
- (ins _.ScalarMemOp:$dst, VK1WM:$mask, _.FRC :$src),
3944
+ (ins _.ScalarMemOp:$dst, VK1WM:$mask, _.RC :$src),
3945
3945
!strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
3946
3946
[], _.ExeDomain>, EVEX, EVEX_K, Sched<[WriteFStore]>,
3947
3947
NotMemoryFoldable;
@@ -3988,7 +3988,7 @@ def : Pat<(masked_store
3988
3988
(iPTR 0))), addr:$dst, Mask),
3989
3989
(!cast<Instruction>(InstrStr#mrk) addr:$dst,
3990
3990
(COPY_TO_REGCLASS MaskRC:$mask, VK1WM),
3991
- (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC) )>;
3991
+ _.info128.RC:$src)>;
3992
3992
3993
3993
}
3994
3994
@@ -4003,7 +4003,7 @@ def : Pat<(masked_store
4003
4003
(iPTR 0))), addr:$dst, Mask),
4004
4004
(!cast<Instruction>(InstrStr#mrk) addr:$dst,
4005
4005
(COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
4006
- (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC) )>;
4006
+ _.info128.RC:$src)>;
4007
4007
4008
4008
}
4009
4009
@@ -4023,13 +4023,13 @@ def : Pat<(masked_store
4023
4023
(iPTR 0))), addr:$dst, Mask512),
4024
4024
(!cast<Instruction>(InstrStr#mrk) addr:$dst,
4025
4025
(COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
4026
- (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC) )>;
4026
+ _.info128.RC:$src)>;
4027
4027
4028
4028
// AVX512VL pattern.
4029
4029
def : Pat<(masked_store (_.info128.VT _.info128.RC:$src), addr:$dst, Mask128),
4030
4030
(!cast<Instruction>(InstrStr#mrk) addr:$dst,
4031
4031
(COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
4032
- (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC) )>;
4032
+ _.info128.RC:$src)>;
4033
4033
}
4034
4034
4035
4035
multiclass avx512_load_scalar_lowering<string InstrStr, AVX512VLVectorVTInfo _,
0 commit comments