//===-- M68kInstrData.td - M68k Data Movement Instructions -*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// \file /// This file describes the Motorola 680x0 data movement instructions which are /// the basic means of transferring and storing addresses and data. Here is the /// current status of the file: /// /// Machine: /// /// EXG [ ] FMOVE [ ] FSMOVE [ ] FDMOVE [ ] FMOVEM [ ] /// LEA [~] PEA [ ] MOVE [~] MOVE16 [ ] MOVEA [ ] /// MOVEM [ ] MOVEP [ ] MOVEQ [ ] LINK [~] UNLK [~] /// /// Pseudo: /// /// MOVI [x] MOVSX [x] MOVZX [x] MOVX [x] /// /// Map: /// /// [ ] - was not touched at all /// [!] - requires extarnal stuff implemented /// [~] - in progress but usable /// [x] - done /// //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // MOVE //===----------------------------------------------------------------------===// /// ----------------------------------------------------- /// F E | D C | B A 9 | 8 7 6 | 5 4 3 | 2 1 0 /// ----------------------------------------------------- /// | | DESTINATION | SOURCE /// 0 0 | SIZE | REG | MODE | MODE | REG /// ----------------------------------------------------- /// /// NOTE Move requires EA X version for direct register destination(0) // MOVE has a different size encoding. class MxMoveSize value> { bits<2> Value = value; } def MxMoveSize8 : MxMoveSize<0b01>; def MxMoveSize16 : MxMoveSize<0b11>; def MxMoveSize32 : MxMoveSize<0b10>; class MxMoveEncoding { dag Value = (ascend (descend 0b00, size.Value, !cond( !eq(!getdagop(dst_enc.EA), descend): !setdagop(dst_enc.EA, ascend), !eq(!getdagop(dst_enc.EA), ascend): !setdagop(dst_enc.EA, descend)), src_enc.EA), // Source extension src_enc.Supplement, // Destination extension dst_enc.Supplement ); } // Special encoding for Xn class MxMoveEncAddrMode_r : MxEncMemOp { let EA = (descend (descend 0b00, (slice "$"#reg_opnd, 3, 3)), (operand "$"#reg_opnd, 3)); } // TODO: Generalize and adopt this utility in other .td files as well. multiclass MxMoveOperandEncodings { // Dn def MxMove#NAME#OpEnc_d : MxEncAddrMode_d; // An def MxMove#NAME#OpEnc_a : MxEncAddrMode_a; // Xn def MxMove#NAME#OpEnc_r : MxMoveEncAddrMode_r; // (An)+ def MxMove#NAME#OpEnc_o : MxEncAddrMode_o; // -(An) def MxMove#NAME#OpEnc_e : MxEncAddrMode_e; // (i,PC,Xn) def MxMove#NAME#OpEnc_k : MxEncAddrMode_k; // (i,PC) def MxMove#NAME#OpEnc_q : MxEncAddrMode_q; // (i,An,Xn) def MxMove#NAME#OpEnc_f : MxEncAddrMode_f; // (i,An) def MxMove#NAME#OpEnc_p : MxEncAddrMode_p; // (ABS).L def MxMove#NAME#OpEnc_b : MxEncAddrMode_abs; // (An) def MxMove#NAME#OpEnc_j : MxEncAddrMode_j; } defm Src : MxMoveOperandEncodings<"src">; defm Dst : MxMoveOperandEncodings<"dst">; defvar MxMoveSupportedAMs = ["o", "e", "k", "q", "f", "p", "b", "j"]; let Defs = [CCR] in class MxMove pattern, MxMoveEncoding enc> : MxInst { let Inst = enc.Value; } // R <- R class MxMove_RR("MxOp"#TYPE.Size#"AddrMode_"#DST_REG), MxOpBundle SRC = !cast("MxOp"#TYPE.Size#"AddrMode_"#SRC_REG)> : MxMove; foreach DST_REG = ["r", "a"] in { foreach SRC_REG = ["r", "a"] in foreach TYPE = [MxType16, MxType32] in def MOV # TYPE.Size # DST_REG # SRC_REG # TYPE.Postfix : MxMove_RR("MxMoveSize"#TYPE.Size), !cast("MxMoveDstOpEnc_"#DST_REG), !cast("MxMoveSrcOpEnc_"#SRC_REG)>>; } // foreach DST_REG foreach TYPE = [MxType8, MxType16, MxType32] in def MOV # TYPE.Size # dd # TYPE.Postfix : MxMove_RR("MxMoveSize"#TYPE.Size), MxMoveDstOpEnc_d, MxMoveSrcOpEnc_d>>; // M <- R let mayStore = 1 in { class MxMove_MR("MxOp"#TYPE.Size#"AddrMode_"#SRC_REG)> : MxMove; class MxMove_MI("MxOp"#TYPE.Size#"AddrMode_i")> : MxMove; } // let mayStore = 1 foreach REG = ["r", "a", "d"] in foreach AM = MxMoveSupportedAMs in { foreach TYPE = !if(!eq(REG, "d"), [MxType8, MxType16, MxType32], [MxType16, MxType32]) in def MOV # TYPE.Size # AM # REG # TYPE.Postfix : MxMove_MR("MxOp"#TYPE.Size#"AddrMode_"#AM), REG, MxMoveEncoding("MxMoveSize"#TYPE.Size), !cast("MxMoveDstOpEnc_"#AM), !cast("MxMoveSrcOpEnc_"#REG)>>; } // foreach AM foreach AM = MxMoveSupportedAMs in { foreach TYPE = [MxType8, MxType16, MxType32] in def MOV # TYPE.Size # AM # i # TYPE.Postfix : MxMove_MI("MxOp"#TYPE.Size#"AddrMode_"#AM), MxMoveEncoding("MxMoveSize"#TYPE.Size), !cast("MxMoveDstOpEnc_"#AM), MxEncAddrMode_i<"src", TYPE.Size>>>; } // foreach AM // R <- I // No pattern, as all immediate -> register moves are matched to the MOVI pseudo class MxMove_RI("MxOp"#TYPE.Size#"AddrMode_i"), MxOpBundle DST = !cast("MxOp"#TYPE.Size#"AddrMode_"#DST_REG)> : MxMove; foreach REG = ["r", "a", "d"] in { foreach TYPE = !if(!eq(REG, "d"), [MxType8, MxType16, MxType32], [MxType16, MxType32]) in def MOV # TYPE.Size # REG # i # TYPE.Postfix : MxMove_RI("MxMoveSize"#TYPE.Size), !cast("MxMoveDstOpEnc_"#REG), MxEncAddrMode_i<"src", TYPE.Size>>>; } // foreach REG // R <- M let mayLoad = 1 in class MxMove_RM("MxMoveSize"#TYPE.Size), MxOpBundle DST = !cast("MxOp"#TYPE.Size#"AddrMode_"#DST_REG), MxEncMemOp DST_ENC = !cast("MxMoveDstOpEnc_"#DST_REG)> : MxMove>; foreach REG = ["r", "a", "d"] in foreach AM = MxMoveSupportedAMs in { foreach TYPE = !if(!eq(REG, "d"), [MxType8, MxType16, MxType32], [MxType16, MxType32]) in def MOV # TYPE.Size # REG # AM # TYPE.Postfix : MxMove_RM("MxOp"#TYPE.Size#"AddrMode_"#AM), !cast("MxMoveSrcOpEnc_"#AM)>; } // foreach AM // Tail call version let Pattern = [(null_frag)] in { foreach REG = ["r", "a"] in foreach AM = MxMoveSupportedAMs in { foreach TYPE = [MxType16, MxType32] in def MOV # TYPE.Size # REG # AM # _TC : MxMove_RM("MxOp"#TYPE.Size#"AddrMode_"#AM), !cast("MxMoveSrcOpEnc_"#AM)> { let isCodeGenOnly = true; } } // foreach AM } // let Pattern let mayLoad = 1, mayStore = 1 in class MxMove_MM : MxMove("MxMoveSize"#TYPE.Size), DST_ENC, SRC_ENC>>; foreach DST_AM = MxMoveSupportedAMs in foreach SRC_AM = MxMoveSupportedAMs in { foreach TYPE = [MxType8, MxType16, MxType32] in def MOV # TYPE.Size # DST_AM # SRC_AM # TYPE.Postfix : MxMove_MM("MxOp"#TYPE.Size#"AddrMode_"#DST_AM), !cast("MxOp"#TYPE.Size#"AddrMode_"#SRC_AM), !cast("MxMoveDstOpEnc_"#DST_AM), !cast("MxMoveSrcOpEnc_"#SRC_AM)>; } // foreach SRC_AM // Store ABS(basically pointer) as Immdiate to Mem def : Pat<(store MxType32.BPat :$src, MxType32.PPat :$dst), (MOV32pi MxType32.POp :$dst, MxType32.IOp :$src)>; def : Pat<(store MxType32.BPat :$src, MxType32.FPat :$dst), (MOV32fi MxType32.FOp :$dst, MxType32.IOp :$src)>; def : Pat<(store MxType32.BPat :$src, MxType32.BPat :$dst), (MOV32bi MxType32.BOp :$dst, MxType32.IOp :$src)>; def : Pat<(store MxType32.BPat :$src, MxType32.JPat :$dst), (MOV32ji MxType32.JOp :$dst, MxType32.IOp :$src)>; //===----------------------------------------------------------------------===// // MOVEQ //===----------------------------------------------------------------------===// /// ------------+---------+---+----------------------- /// F E D C | B A 9 | 8 | 7 6 5 4 3 2 1 0 /// ------------+---------+---+----------------------- /// 0 1 1 1 | REG | 0 | DATA /// ------------+---------+---+----------------------- // No pattern, as all immediate -> register moves are matched to the MOVI pseudo let Defs = [CCR] in def MOVQ : MxInst<(outs MxDRD32:$dst), (ins Mxi8imm:$imm), "moveq\t$imm, $dst", [(null_frag)]> { let Inst = (descend 0b0111, (operand "$dst", 3), 0b0, (operand "$imm", 8)); } //===----------------------------------------------------------------------===// // MOVEM // // The mask is already pre-processed by the save/restore spill hook //===----------------------------------------------------------------------===// // Direction defvar MxMOVEM_MR = false; defvar MxMOVEM_RM = true; // Size defvar MxMOVEM_W = false; defvar MxMOVEM_L = true; /// ---------------+-------------+-------------+--------- /// F E D C B | A | 9 8 7 | 6 | 5 4 3 | 2 1 0 /// ---------------+---+---------+---+---------+--------- /// 0 1 0 0 1 | D | 0 0 1 | S | MODE | REG /// ---------------+---+---------+---+---------+--------- /// REGISTER LIST MASK /// ----------------------------------------------------- /// D - direction(RM,MR) /// S - size(W,L) class MxMOVEMEncoding { dag Value = (ascend (descend 0b01001, direction, 0b001, size, opnd_enc.EA), // Mask (operand "$"#mask_op_name, 16), opnd_enc.Supplement ); } let mayStore = 1 in class MxMOVEM_MR : MxInst<(outs), (ins MEMOp:$dst, MxMoveMask:$mask), "movem."#TYPE.Prefix#"\t$mask, $dst", []> { let Inst = MxMOVEMEncoding.Value; } foreach AM = MxMoveSupportedAMs in { foreach TYPE = [MxType16, MxType32] in def MOVM # TYPE.Size # AM # m # TYPE.Postfix : MxMOVEM_MR("MxOp"#TYPE.Size#"AddrMode_"#AM).Op, !cast("MxMoveDstOpEnc_"#AM)>; } // foreach AM let mayLoad = 1 in class MxMOVEM_RM : MxInst<(outs), (ins MxMoveMask:$mask, MEMOp:$src), "movem."#TYPE.Prefix#"\t$src, $mask", []> { let Inst = MxMOVEMEncoding.Value; } foreach AM = MxMoveSupportedAMs in { foreach TYPE = [MxType16, MxType32] in def MOVM # TYPE.Size # m # AM # TYPE.Postfix : MxMOVEM_RM("MxOp"#TYPE.Size#"AddrMode_"#AM).Op, !cast("MxMoveSrcOpEnc_"#AM)>; } // foreach AM // Pseudo versions. These a required by virtual register spill/restore since // the mask requires real register to encode. These instruction will be expanded // into real MOVEM after RA finishes. let mayStore = 1 in class MxMOVEM_MR_Pseudo : MxPseudo<(outs), (ins MEMOp:$dst, TYPE.ROp:$reg)>; let mayLoad = 1 in class MxMOVEM_RM_Pseudo : MxPseudo<(outs TYPE.ROp:$dst), (ins MEMOp:$src)>; // Mem <- Reg def MOVM8jm_P : MxMOVEM_MR_Pseudo; def MOVM16jm_P : MxMOVEM_MR_Pseudo; def MOVM32jm_P : MxMOVEM_MR_Pseudo; def MOVM8pm_P : MxMOVEM_MR_Pseudo; def MOVM16pm_P : MxMOVEM_MR_Pseudo; def MOVM32pm_P : MxMOVEM_MR_Pseudo; // Reg <- Mem def MOVM8mj_P : MxMOVEM_RM_Pseudo; def MOVM16mj_P : MxMOVEM_RM_Pseudo; def MOVM32mj_P : MxMOVEM_RM_Pseudo; def MOVM8mp_P : MxMOVEM_RM_Pseudo; def MOVM16mp_P : MxMOVEM_RM_Pseudo; def MOVM32mp_P : MxMOVEM_RM_Pseudo; //===----------------------------------------------------------------------===// // MOVE to/from SR/CCR // // A special care must be taken working with to/from CCR since it is basically // word-size SR register truncated for user mode thus it only supports word-size // instructions. Plus the original M68000 does not support moves from CCR. So in // order to use CCR effectively one MUST use proper byte-size pseudo instructi- // ons that will be resolved sometime after RA pass. //===----------------------------------------------------------------------===// /// -------------------------------------------------- /// F E D C B A 9 8 7 6 | 5 4 3 | 2 1 0 /// -------------------------------------------------- /// | EFFECTIVE ADDRESS /// 0 1 0 0 0 1 0 0 1 1 | MODE | REG /// -------------------------------------------------- let Defs = [CCR] in class MxMoveToCCR : MxInst<(outs CCRC:$dst), (ins MEMOp:$src), "move.w\t$src, $dst", []> { let Inst = (ascend (descend 0b0100010011, SRC_ENC.EA), SRC_ENC.Supplement ); } class MxMoveToCCRPseudo : MxPseudo<(outs CCRC:$dst), (ins MEMOp:$src)>; let mayLoad = 1 in foreach AM = MxMoveSupportedAMs in { def MOV16c # AM : MxMoveToCCR("MxOp16AddrMode_"#AM).Op, !cast("MxMoveSrcOpEnc_"#AM)>; def MOV8c # AM : MxMoveToCCRPseudo("MxOp8AddrMode_"#AM).Op>; } // foreach AM // Only data register is allowed. def MOV16cd : MxMoveToCCR; def MOV8cd : MxMoveToCCRPseudo; /// Move from CCR /// -------------------------------------------------- /// F E D C B A 9 8 7 6 | 5 4 3 | 2 1 0 /// -------------------------------------------------- /// | EFFECTIVE ADDRESS /// 0 1 0 0 0 0 1 0 1 1 | MODE | REG /// -------------------------------------------------- let Uses = [CCR] in { class MxMoveFromCCR_R : MxInst<(outs MxDRD16:$dst), (ins CCRC:$src), "move.w\t$src, $dst", []>, Requires<[ AtLeastM68010 ]> { let Inst = (descend 0b0100001011, MxEncAddrMode_d<"dst">.EA); } class MxMoveFromCCR_M : MxInst<(outs), (ins MEMOp:$dst, CCRC:$src), "move.w\t$src, $dst", []>, Requires<[ AtLeastM68010 ]> { let Inst = (ascend (descend 0b0100001011, DST_ENC.EA), DST_ENC.Supplement ); } class MxMoveFromCCRPseudo : MxPseudo<(outs), (ins MEMOp:$dst, CCRC:$src)>; } // let Uses = [CCR] let mayStore = 1 in foreach AM = MxMoveSupportedAMs in { def MOV16 # AM # c : MxMoveFromCCR_M("MxOp16AddrMode_"#AM).Op, !cast("MxMoveDstOpEnc_"#AM)>; def MOV8 # AM # c : MxMoveFromCCRPseudo("MxOp8AddrMode_"#AM).Op>; } // foreach AM // Only data register is allowed. def MOV16dc : MxMoveFromCCR_R; def MOV8dc : MxMoveFromCCRPseudo; //===----------------------------------------------------------------------===// // LEA //===----------------------------------------------------------------------===// /// ---------------------------------------------------- /// F E D C | B A 9 | 8 7 6 | 5 4 3 | 2 1 0 /// ---------------------------------------------------- /// 0 1 0 0 | DST REG | 1 1 1 | MODE | REG /// ---------------------------------------------------- class MxLEA : MxInst<(outs MxARD32:$dst), (ins SRC.Op:$src), "lea\t$src, $dst", [(set i32:$dst, SRC.Pat:$src)]> { let Inst = (ascend (descend 0b0100, (operand "$dst", 3), 0b111, SRC_ENC.EA), SRC_ENC.Supplement ); } foreach AM = ["p", "f", "b", "q", "k"] in def LEA32 # AM : MxLEA("MxOp32AddrMode_"#AM), !cast("MxMoveSrcOpEnc_"#AM)>; //===----------------------------------------------------------------------===// // LINK/UNLK //===----------------------------------------------------------------------===// let Uses = [SP], Defs = [SP] in { let mayStore = 1 in { def LINK16 : MxInst<(outs), (ins MxARD16:$src, Mxi16imm:$disp), "link.w\t$src, $disp", []> { let Inst = (ascend (descend 0b0100111001010, (operand "$src", 3)), (operand "$disp", 16) ); } def LINK32 : MxInst<(outs), (ins MxARD16:$src, Mxi32imm:$disp), "link.l\t$src, $disp", []> { let Inst = (ascend (descend 0b0100100000001, (operand "$src", 3)), (slice "$disp", 31, 16), (slice "$disp", 15, 0) ); } def UNLK : MxInst<(outs), (ins MxARD32:$src), "unlk\t$src", []> { let Inst = (descend 0b0100111001011, (operand "$src", 3)); } } // let mayStore = 1 } // let Uses = [SP], Defs = [SP] //===----------------------------------------------------------------------===// // Pseudos //===----------------------------------------------------------------------===// /// Pushe/Pop to/from SP for simplicity let Uses = [SP], Defs = [SP], hasSideEffects = 0 in { // SP <- SP - ; (SP) <- Dn let mayStore = 1 in { def PUSH8d : MxPseudo<(outs), (ins DR8:$reg)>; def PUSH16d : MxPseudo<(outs), (ins DR16:$reg)>; def PUSH32r : MxPseudo<(outs), (ins XR32:$reg)>; } // let mayStore = 1 // Dn <- (SP); SP <- SP + let mayLoad = 1 in { def POP8d : MxPseudo<(outs DR8:$reg), (ins)>; def POP16d : MxPseudo<(outs DR16:$reg), (ins)>; def POP32r : MxPseudo<(outs XR32:$reg), (ins)>; } // let mayLoad = 1 } // let Uses/Defs = [SP], hasSideEffects = 0 let Defs = [CCR] in { class MxPseudoMove_RR PAT = []> : MxPseudo<(outs DST.ROp:$dst), (ins SRC.ROp:$src), PAT>; class MxPseudoMove_RM PAT = []> : MxPseudo<(outs DST.ROp:$dst), (ins SRCOpd:$src), PAT>; // These Pseudos handle loading immediates to registers. // They are expanded post-RA into either move or moveq instructions, // depending on size, destination register class, and immediate value. // This is done with pseudoinstructions in order to not constrain RA to // data registers if moveq matches. class MxPseudoMove_DI : MxPseudo<(outs TYPE.ROp:$dst), (ins TYPE.IOp:$src), [(set TYPE.ROp:$dst, imm:$src)]>; // i8 imm -> reg can always be converted to moveq, // but we still emit a pseudo for consistency. def MOVI8di : MxPseudoMove_DI; def MOVI16ri : MxPseudoMove_DI; def MOVI32ri : MxPseudoMove_DI; } // let Defs = [CCR] /// This group of Pseudos is analogues to the real x86 extending moves, but /// since M68k does not have those we need to emulate. These instructions /// will be expanded right after RA completed because we need to know precisely /// what registers are allocated for the operands and if they overlap we just /// extend the value if the registers are completely different we need to move /// first. foreach EXT = ["S", "Z"] in { let hasSideEffects = 0 in { def MOV#EXT#Xd16d8 : MxPseudoMove_RR; def MOV#EXT#Xd32d8 : MxPseudoMove_RR; def MOV#EXT#Xd32d16 : MxPseudoMove_RR; let mayLoad = 1 in { def MOV#EXT#Xd16j8 : MxPseudoMove_RM; def MOV#EXT#Xd32j8 : MxPseudoMove_RM; def MOV#EXT#Xd32j16 : MxPseudoMove_RM; def MOV#EXT#Xd16p8 : MxPseudoMove_RM; def MOV#EXT#Xd32p8 : MxPseudoMove_RM; def MOV#EXT#Xd32p16 : MxPseudoMove_RM; def MOV#EXT#Xd16f8 : MxPseudoMove_RM; def MOV#EXT#Xd32f8 : MxPseudoMove_RM; def MOV#EXT#Xd32f16 : MxPseudoMove_RM; def MOV#EXT#Xd16q8 : MxPseudoMove_RM; def MOV#EXT#Xd32q8 : MxPseudoMove_RM; def MOV#EXT#Xd32q16 : MxPseudoMove_RM; } } } /// This group of instructions is similar to the group above but DOES NOT do /// any value extension, they just load a smaller register into the lower part /// of another register if operands' real registers are different or does /// nothing if they are the same. def MOVXd16d8 : MxPseudoMove_RR; def MOVXd32d8 : MxPseudoMove_RR; def MOVXd32d16 : MxPseudoMove_RR; //===----------------------------------------------------------------------===// // Extend/Truncate Patterns //===----------------------------------------------------------------------===// // i16 <- sext i8 def: Pat<(i16 (sext i8:$src)), (EXTRACT_SUBREG (MOVSXd32d8 MxDRD8:$src), MxSubRegIndex16Lo)>; def: Pat<(MxSExtLoadi16i8 MxCP_ARI:$src), (EXTRACT_SUBREG (MOVSXd32j8 MxARI8:$src), MxSubRegIndex16Lo)>; def: Pat<(MxSExtLoadi16i8 MxCP_ARID:$src), (EXTRACT_SUBREG (MOVSXd32p8 MxARID8:$src), MxSubRegIndex16Lo)>; def: Pat<(MxSExtLoadi16i8 MxCP_ARII:$src), (EXTRACT_SUBREG (MOVSXd32f8 MxARII8:$src), MxSubRegIndex16Lo)>; def: Pat<(MxSExtLoadi16i8 MxCP_PCD:$src), (MOVSXd16q8 MxPCD8:$src)>; // i32 <- sext i8 def: Pat<(i32 (sext i8:$src)), (MOVSXd32d8 MxDRD8:$src)>; def: Pat<(MxSExtLoadi32i8 MxCP_ARI :$src), (MOVSXd32j8 MxARI8 :$src)>; def: Pat<(MxSExtLoadi32i8 MxCP_ARID:$src), (MOVSXd32p8 MxARID8:$src)>; def: Pat<(MxSExtLoadi32i8 MxCP_ARII:$src), (MOVSXd32f8 MxARII8:$src)>; def: Pat<(MxSExtLoadi32i8 MxCP_PCD:$src), (MOVSXd32q8 MxPCD8:$src)>; // i32 <- sext i16 def: Pat<(i32 (sext i16:$src)), (MOVSXd32d16 MxDRD16:$src)>; def: Pat<(MxSExtLoadi32i16 MxCP_ARI :$src), (MOVSXd32j16 MxARI16 :$src)>; def: Pat<(MxSExtLoadi32i16 MxCP_ARID:$src), (MOVSXd32p16 MxARID16:$src)>; def: Pat<(MxSExtLoadi32i16 MxCP_ARII:$src), (MOVSXd32f16 MxARII16:$src)>; def: Pat<(MxSExtLoadi32i16 MxCP_PCD:$src), (MOVSXd32q16 MxPCD16:$src)>; // i16 <- zext i8 def: Pat<(i16 (zext i8:$src)), (EXTRACT_SUBREG (MOVZXd32d8 MxDRD8:$src), MxSubRegIndex16Lo)>; def: Pat<(MxZExtLoadi16i8 MxCP_ARI:$src), (EXTRACT_SUBREG (MOVZXd32j8 MxARI8:$src), MxSubRegIndex16Lo)>; def: Pat<(MxZExtLoadi16i8 MxCP_ARID:$src), (EXTRACT_SUBREG (MOVZXd32p8 MxARID8:$src), MxSubRegIndex16Lo)>; def: Pat<(MxZExtLoadi16i8 MxCP_ARII:$src), (EXTRACT_SUBREG (MOVZXd32f8 MxARII8:$src), MxSubRegIndex16Lo)>; def: Pat<(MxZExtLoadi16i8 MxCP_PCD :$src), (MOVZXd16q8 MxPCD8 :$src)>; // i32 <- zext i8 def: Pat<(i32 (zext i8:$src)), (MOVZXd32d8 MxDRD8:$src)>; def: Pat<(MxZExtLoadi32i8 MxCP_ARI :$src), (MOVZXd32j8 MxARI8 :$src)>; def: Pat<(MxZExtLoadi32i8 MxCP_ARID:$src), (MOVZXd32p8 MxARID8:$src)>; def: Pat<(MxZExtLoadi32i8 MxCP_ARII:$src), (MOVZXd32f8 MxARII8:$src)>; def: Pat<(MxZExtLoadi32i8 MxCP_PCD :$src), (MOVZXd32q8 MxPCD8 :$src)>; // i32 <- zext i16 def: Pat<(i32 (zext i16:$src)), (MOVZXd32d16 MxDRD16:$src)>; def: Pat<(MxZExtLoadi32i16 MxCP_ARI :$src), (MOVZXd32j16 MxARI16 :$src)>; def: Pat<(MxZExtLoadi32i16 MxCP_ARID:$src), (MOVZXd32p16 MxARID16:$src)>; def: Pat<(MxZExtLoadi32i16 MxCP_ARII:$src), (MOVZXd32f16 MxARII16:$src)>; def: Pat<(MxZExtLoadi32i16 MxCP_PCD :$src), (MOVZXd32q16 MxPCD16 :$src)>; // i16 <- anyext i8 def: Pat<(i16 (anyext i8:$src)), (EXTRACT_SUBREG (MOVZXd32d8 MxDRD8:$src), MxSubRegIndex16Lo)>; def: Pat<(MxExtLoadi16i8 MxCP_ARI:$src), (EXTRACT_SUBREG (MOVZXd32j8 MxARI8:$src), MxSubRegIndex16Lo)>; def: Pat<(MxExtLoadi16i8 MxCP_ARID:$src), (EXTRACT_SUBREG (MOVZXd32p8 MxARID8:$src), MxSubRegIndex16Lo)>; def: Pat<(MxExtLoadi16i8 MxCP_ARII:$src), (EXTRACT_SUBREG (MOVZXd32f8 MxARII8:$src), MxSubRegIndex16Lo)>; // i32 <- anyext i8 def: Pat<(i32 (anyext i8:$src)), (MOVZXd32d8 MxDRD8:$src)>; def: Pat<(MxExtLoadi32i8 MxCP_ARI :$src), (MOVZXd32j8 MxARI8 :$src)>; def: Pat<(MxExtLoadi32i8 MxCP_ARID:$src), (MOVZXd32p8 MxARID8:$src)>; def: Pat<(MxExtLoadi32i8 MxCP_ARII:$src), (MOVZXd32f8 MxARII8:$src)>; // i32 <- anyext i16 def: Pat<(i32 (anyext i16:$src)), (MOVZXd32d16 MxDRD16:$src)>; def: Pat<(MxExtLoadi32i16 MxCP_ARI :$src), (MOVZXd32j16 MxARI16 :$src)>; def: Pat<(MxExtLoadi32i16 MxCP_ARID:$src), (MOVZXd32p16 MxARID16:$src)>; def: Pat<(MxExtLoadi32i16 MxCP_ARII:$src), (MOVZXd32f16 MxARII16:$src)>; // trunc patterns def : Pat<(i16 (trunc i32:$src)), (EXTRACT_SUBREG MxXRD32:$src, MxSubRegIndex16Lo)>; def : Pat<(i8 (trunc i32:$src)), (EXTRACT_SUBREG MxXRD32:$src, MxSubRegIndex8Lo)>; def : Pat<(i8 (trunc i16:$src)), (EXTRACT_SUBREG MxXRD16:$src, MxSubRegIndex8Lo)>; //===----------------------------------------------------------------------===// // FMOVE //===----------------------------------------------------------------------===// let Defs = [FPS] in class MxFMove pattern, string rounding = ""> : MxInst { // Only FMOVE uses FPC let Uses = !if(!eq(rounding, ""), [FPC], []); // FSMOVE and FDMOVE are only available after M68040 let Predicates = [!if(!eq(rounding, ""), AtLeastM68881, AtLeastM68040)]; } // FPDR <- FPDR class MxFMove_FF("MxOp"#size#"AddrMode_fpr")> : MxFMove<"x", (outs Opnd.Op:$dst), (ins Opnd.Op:$src), [(null_frag)], rounding> { let Inst = (ascend (descend 0b1111, /*COPROCESSOR ID*/0b001, 0b000, /*MODE + REGISTER*/0b000000 ), (descend 0b0, /* R/M */0b0, 0b0, /*SOURCE SPECIFIER*/ (operand "$src", 3), /*DESTINATION*/ (operand "$dst", 3), /*OPMODE*/ !cond(!eq(rounding, "s"): 0b1000000, !eq(rounding, "d"): 0b1000100, true: 0b0000000) ) ); } foreach rounding = ["", "s", "d"] in { def F # !toupper(rounding) # MOV80fp_fp : MxFMove_FF; // We don't have `fmove.s` or `fmove.d` because values will be converted to // f80 upon storing into the register, but FMOV32/64fp_fp are still needed // to make codegen easier. let isCodeGenOnly = true in foreach size = [32, 64] in def F # !toupper(rounding) # MOV # size # fp_fp : MxFMove_FF; } // Direction defvar MxFMove_FP_EA = false; defvar MxFMove_EA_FP = true; // Encoding scheme for FPSYS <-> R/M class MxEncFSysMove { dag Value = (ascend (descend 0b1111, /*COPROCESSOR ID*/0b001, 0b000, /*MODE + REGISTER*/ EAEnc.EA ), (descend 0b10, /*dir*/ dir, /*REGISTER SELECT*/ (operand "$"#fsys_reg, 3, (encoder "encodeFPSYSSelect")), 0b0000000000 ) ); } // FPSYS <-> R class MxFMove_FSYS_R("MxOp32AddrMode_"#src_reg), MxOpBundle DstOpnd = !cond(!eq(src_reg, "d"): MxOp32AddrMode_fpcs, !eq(src_reg, "a"): MxOp32AddrMode_fpi), MxEncMemOp SrcEnc = !cast("MxMoveSrcOpEnc_"#src_reg)> : MxFMove<"l", (outs DstOpnd.Op:$dst), (ins SrcOpnd.Op:$src), [(null_frag)]> { let Inst = MxEncFSysMove.Value; } class MxFMove_R_FSYS("MxOp32AddrMode_"#dst_reg), MxEncMemOp DstEnc = !cast("MxMoveDstOpEnc_"#dst_reg)> : MxFMove<"l", (outs DstOpnd.Op:$dst), (ins SrcOpnd.Op:$src), [(null_frag)]> { let Inst = MxEncFSysMove.Value; } def FMOVE32fpcs_d : MxFMove_FSYS_R<"d">; def FMOVE32d_fpcs : MxFMove_R_FSYS<"d">; def FMOVE32fpi_a : MxFMove_FSYS_R<"a">; def FMOVE32a_fpi : MxFMove_R_FSYS<"a">;