1package xiangshan.backend.decode 2 3import org.chipsalliance.cde.config.Parameters 4import chisel3._ 5import chisel3.util._ 6import freechips.rocketchip.rocket.Instructions._ 7import freechips.rocketchip.util.uintToBitPat 8import utility._ 9import utils._ 10import xiangshan._ 11import xiangshan.backend.Bundles.{DecodedInst, DynInst, StaticInst} 12import xiangshan.backend.fu.FuType 13import xiangshan.backend.fu.vector.Bundles._ 14import xiangshan.backend.decode.isa.bitfield.{InstVType, XSInstBitFields, OPCODE7Bit} 15import xiangshan.backend.decode.Zvbb._ 16 17object RegNumNotAlign { 18 def apply(reg: UInt, emul: UInt): Bool = { 19 emul === "b101".U && reg(0) =/= 0.U || emul === "b110".U && reg(1, 0) =/= 0.U || emul === "b111".U && reg(2, 0) =/= 0.U 20 } 21} 22 23object NFtoLmul { 24 def apply(nf: UInt): UInt = { 25 LookupTree(nf, List( 26 "b000".U -> 4.U, 27 "b001".U -> 5.U, 28 "b011".U -> 6.U, 29 "b111".U -> 7.U 30 )) 31 } 32} 33 34object LmultoRegNum { 35 def apply(lmul: UInt): UInt = { 36 val numPow = Mux(lmul(2).asBool, lmul(1, 0), 0.U(2.W)) 37 val regNum = 1.U << numPow 38 regNum 39 } 40} 41 42class VecExceptionGen(implicit p: Parameters) extends XSModule{ 43 val io = IO(new Bundle(){ 44 val inst = Input(UInt(32.W)) 45 val decodedInst = Input(new DecodedInst) 46 val vtype = Input(new VType) 47 val vstart = Input(Vl()) 48 49 val illegalInst = Output(Bool()) 50 }) 51 52 private val inst: XSInstBitFields = io.inst.asTypeOf(new XSInstBitFields) 53 private val isVArithMem = FuType.isVArithMem(io.decodedInst.fuType) 54 private val isVArith = FuType.isVArith(io.decodedInst.fuType) 55 private val isVset = FuType.isVset(io.decodedInst.fuType) 56 57 private val SEW = io.vtype.vsew(1, 0) 58 private val LMUL = Cat(~io.vtype.vlmul(2), io.vtype.vlmul(1, 0)) 59 60 private val lsStrideInst = Seq( 61 VLE8_V, VLE16_V, VLE32_V, VLE64_V, VSE8_V, VSE16_V, VSE32_V, VSE64_V, 62 VLSE8_V, VLSE16_V, VLSE32_V, VLSE64_V, VSSE8_V, VSSE16_V, VSSE32_V, VSSE64_V, 63 VLE8FF_V, VLE16FF_V, VLE32FF_V, VLE64FF_V 64 ).map(_ === inst.ALL).reduce(_ || _) 65 66 private val lsMaskInst = Seq( 67 VLM_V, VSM_V 68 ).map(_ === inst.ALL).reduce(_ || _) 69 70 private val lsIndexInst = Seq( 71 VLUXEI8_V, VLUXEI16_V, VLUXEI32_V, VLUXEI64_V, VLOXEI8_V, VLOXEI16_V, VLOXEI32_V, VLOXEI64_V, 72 VSUXEI8_V, VSUXEI16_V, VSUXEI32_V, VSUXEI64_V, VSOXEI8_V, VSOXEI16_V, VSOXEI32_V, VSOXEI64_V 73 ).map(_ === inst.ALL).reduce(_ || _) 74 75 private val lsWholeInst = Seq( 76 VL1RE8_V, VL1RE16_V, VL1RE32_V, VL1RE64_V, 77 VL2RE8_V, VL2RE16_V, VL2RE32_V, VL2RE64_V, 78 VL4RE8_V, VL4RE16_V, VL4RE32_V, VL4RE64_V, 79 VL8RE8_V, VL8RE16_V, VL8RE32_V, VL8RE64_V, 80 VS1R_V, VS2R_V, VS4R_V, VS8R_V 81 ).map(_ === inst.ALL).reduce(_ || _) 82 83 private val vdWideningInst = Seq( 84 //int 85 VWADD_VV, VWADD_VX, VWADD_WV, VWADD_WX, VWADDU_VV, VWADDU_VX, VWADDU_WV, VWADDU_WX, 86 VWMACC_VV, VWMACC_VX, VWMACCSU_VV, VWMACCSU_VX, VWMACCU_VV, VWMACCU_VX, VWMACCUS_VX, 87 VWMUL_VV, VWMUL_VX, VWMULSU_VV, VWMULSU_VX, VWMULU_VV, VWMULU_VX, 88 VWSUB_VV, VWSUB_VX, VWSUB_WV, VWSUB_WX, VWSUBU_VV, VWSUBU_VX, VWSUBU_WV, VWSUBU_WX, 89 //fp 90 VFWADD_VF, VFWADD_VV, VFWADD_WF, VFWADD_WV, VFWSUB_VF, VFWSUB_VV, VFWSUB_WF, VFWSUB_WV, 91 VFWMUL_VF, VFWMUL_VV, 92 VFWMACC_VF, VFWMACC_VV, VFWMSAC_VF, VFWMSAC_VV, VFWNMACC_VF, VFWNMACC_VV, VFWNMSAC_VF, VFWNMSAC_VV, 93 VFWCVT_F_F_V, VFWCVT_F_X_V, VFWCVT_F_XU_V, VFWCVT_RTZ_X_F_V, VFWCVT_RTZ_XU_F_V, VFWCVT_X_F_V, VFWCVT_XU_F_V, 94 // zvbb 95 VWSLL_VV, VWSLL_VX, VWSLL_VI, 96 ).map(_ === inst.ALL).reduce(_ || _) 97 98 private val vs2WideningInst = Seq( 99 //int 100 VWADD_WV, VWADD_WX, VWADDU_WV, VWADDU_WX, 101 VWSUB_WV, VWSUB_WX, VWSUBU_WV, VWSUBU_WX, 102 //fp 103 VFWADD_WF, VFWADD_WV, VFWSUB_WF, VFWSUB_WV 104 ).map(_ === inst.ALL).reduce(_ || _) 105 106 private val narrowingInst = Seq( 107 //int 108 VNCLIP_WI, VNCLIP_WV, VNCLIP_WX, VNCLIPU_WI, VNCLIPU_WV, VNCLIPU_WX, 109 VNSRA_WI, VNSRA_WV, VNSRA_WX, VNSRL_WI, VNSRL_WV, VNSRL_WX, 110 //fp 111 VFNCVT_F_F_W, VFNCVT_F_X_W, VFNCVT_F_XU_W, VFNCVT_ROD_F_F_W, VFNCVT_RTZ_X_F_W, VFNCVT_RTZ_XU_F_W, VFNCVT_X_F_W, VFNCVT_XU_F_W 112 ).map(_ === inst.ALL).reduce(_ || _) 113 114 private val intExtInst = Seq( 115 VSEXT_VF2, VSEXT_VF4, VSEXT_VF8, VZEXT_VF2, VZEXT_VF4, VZEXT_VF8 116 ).map(_ === inst.ALL).reduce(_ || _) 117 118 private val acsbInst = Seq( 119 VMADC_VI, VMADC_VIM, VMADC_VV, VMADC_VVM, VMADC_VX, VMADC_VXM, 120 VMSBC_VV, VMSBC_VVM, VMSBC_VX, VMSBC_VXM 121 ).map(_ === inst.ALL).reduce(_ || _) 122 123 private val cmpInst = Seq( 124 //int 125 VMSEQ_VI, VMSEQ_VV, VMSEQ_VX, 126 VMSGT_VI, VMSGT_VX, VMSGTU_VI, VMSGTU_VX, 127 VMSLE_VI, VMSLE_VV, VMSLE_VX, VMSLEU_VI, VMSLEU_VV, VMSLEU_VX, 128 VMSLT_VV, VMSLT_VX, VMSLTU_VV, VMSLTU_VX, 129 VMSNE_VI, VMSNE_VV, VMSNE_VX, 130 //fp 131 VMFEQ_VF, VMFEQ_VV, VMFNE_VF, VMFNE_VV, 132 VMFGE_VF, VMFGT_VF, VMFLE_VF, VMFLE_VV, VMFLT_VF, VMFLT_VV 133 ).map(_ === inst.ALL).reduce(_ || _) 134 135 private val redInst = Seq( 136 VREDAND_VS, VREDMAX_VS, VREDMAXU_VS, VREDMIN_VS, VREDMINU_VS, VREDOR_VS, VREDSUM_VS, VREDXOR_VS, 137 VFREDMAX_VS, VFREDMIN_VS, VFREDOSUM_VS, VFREDUSUM_VS 138 ).map(_ === inst.ALL).reduce(_ || _) 139 140 private val redWideningInst = Seq( 141 VWREDSUM_VS, VWREDSUMU_VS, 142 VFWREDOSUM_VS, VFWREDUSUM_VS 143 ).map(_ === inst.ALL).reduce(_ || _) 144 145 private val maskLogicalInst = Seq( 146 VMAND_MM, VMNAND_MM, VMANDN_MM, VMXOR_MM, VMOR_MM, VMNOR_MM, VMORN_MM, VMXNOR_MM 147 ).map(_ === inst.ALL).reduce(_ || _) 148 149 private val maskArithmeticInst = Seq( 150 VCPOP_M, VFIRST_M, VMSBF_M, VMSIF_M, VMSOF_M 151 ).map(_ === inst.ALL).reduce(_ || _) || maskLogicalInst 152 153 private val maskIndexInst = Seq( 154 VIOTA_M, VID_V 155 ).map(_ === inst.ALL).reduce(_ || _) 156 157 private val vmvSingleInst = Seq( 158 VMV_X_S, VMV_S_X, VFMV_F_S, VFMV_S_F 159 ).map(_ === inst.ALL).reduce(_ || _) 160 161 private val vmvWholeInst = Seq( 162 VMV1R_V, VMV2R_V, VMV4R_V, VMV8R_V 163 ).map(_ === inst.ALL).reduce(_ || _) 164 165 private val vrgather16 = VRGATHEREI16_VV === inst.ALL 166 private val vcompress = VCOMPRESS_VM === inst.ALL 167 private val intExt2 = Seq(VSEXT_VF2, VZEXT_VF2).map(_ === inst.ALL).reduce(_ || _) 168 private val intExt4 = Seq(VSEXT_VF4, VZEXT_VF4).map(_ === inst.ALL).reduce(_ || _) 169 private val intExt8 = Seq(VSEXT_VF8, VZEXT_VF8).map(_ === inst.ALL).reduce(_ || _) 170 171 private val notDependVtypeInst = Seq(VSETVLI, VSETIVLI, VSETVL).map(_ === inst.ALL).reduce(_ || _) || lsWholeInst 172 173 174 // 1. inst Illegal 175 private val instIllegal = maskLogicalInst && inst.VM === 0.U 176 177 // 2. vill Illegal 178 private val villIllegal = io.vtype.illegal && isVArithMem && !notDependVtypeInst 179 180 // 3. EEW Illegal 181 private val doubleFpInst = Seq( 182 VFWCVT_F_X_V, VFWCVT_F_XU_V, VFNCVT_RTZ_X_F_W, VFNCVT_RTZ_XU_F_W, VFNCVT_X_F_W, VFNCVT_XU_F_W 183 ).map(_ === inst.ALL).reduce(_ || _) 184 185 // funct3 of OPFVV is 001, funct3 of OPFVF is 101 186 private val isFp = (inst.FUNCT3 === BitPat("b?01")) && (inst.OPCODE7Bit === OPCODE7Bit.VECTOR_ARITH) 187 private val fpEewIllegal = isFp && ((SEW === 0.U) && !doubleFpInst) 188 189 private val intExtEewIllegal = intExt2 && SEW === 0.U || 190 intExt4 && SEW <= 1.U || 191 intExt8 && SEW <= 2.U 192 193 private val wnEewIllegal = (vdWideningInst || narrowingInst || redWideningInst) && SEW === 3.U 194 195 private val eewIllegal = fpEewIllegal || intExtEewIllegal || wnEewIllegal 196 197 // 4. EMUL Illegal 198 private val lsEmulIllegal = (lsStrideInst || lsIndexInst) && (LMUL +& inst.WIDTH(1, 0) < SEW +& 1.U || LMUL +& inst.WIDTH(1, 0) > SEW +& 7.U) 199 200 private val intExtEmulIllegal = intExt2 && LMUL === 1.U || 201 intExt4 && LMUL <= 2.U || 202 intExt8 && LMUL <= 3.U 203 204 private val wnEmulIllegal = (vdWideningInst || narrowingInst) && LMUL === 7.U 205 206 private val gather16EmulIllegal = vrgather16 && (LMUL < SEW || LMUL > SEW +& 6.U) 207 208 private val NFIELDS = inst.NF +& 1.U 209 private val segEmul = Mux(lsIndexInst, LMUL, LMUL +& inst.WIDTH(1, 0) - SEW) 210 private val emulNumPow = Mux(segEmul(2), segEmul(1, 0), 0.U(2.W)) 211 private val segRegNum = NFIELDS << emulNumPow 212 private val segRegMax = inst.VD +& segRegNum 213 214 private val lsSegIllegal = (lsStrideInst || lsIndexInst) && inst.NF =/= 0.U && (segRegNum > 8.U || segRegMax > 32.U) 215 216 private val emulIllegal = lsEmulIllegal || intExtEmulIllegal || wnEmulIllegal || gather16EmulIllegal || lsSegIllegal 217 218 // 5. Reg Number Align 219 private val vs1IsMask = maskArithmeticInst || vcompress 220 private val vs1IsSingleElem = redInst || redWideningInst 221 private val vs1Eew = Mux(vrgather16, "b01".U, SEW) 222 private val vs1Emul = Mux(vs1IsMask || vs1IsSingleElem, "b100".U, Mux(vrgather16, LMUL +& 1.U - SEW, LMUL)) 223 private val vs1NotAlign = SrcType.isVp(io.decodedInst.srcType(0)) && RegNumNotAlign(inst.VS1, vs1Emul) 224 225 private val vs2IsMask = maskArithmeticInst || maskIndexInst 226 private val vs2IsSingleElem = vmvSingleInst 227 private val vs2EewSel = Cat(lsIndexInst, (vs2WideningInst || narrowingInst || redWideningInst), intExt2, intExt4, intExt8) 228 private val vs2Eew = LookupTreeDefault(vs2EewSel, SEW, List( 229 "b10000".U -> inst.WIDTH(1, 0), 230 "b01000".U -> (SEW + 1.U), 231 "b00100".U -> (SEW - 1.U), 232 "b00010".U -> (SEW - 2.U), 233 "b00001".U -> (SEW - 3.U) 234 )) 235 private val vs2EmulSel = Cat((vs2IsMask || vs2IsSingleElem), (vs2WideningInst || narrowingInst), vmvWholeInst, (intExtInst || lsIndexInst)) 236 private val vs2Emul = LookupTreeDefault(vs2EmulSel, LMUL, List( 237 "b1000".U -> "b100".U, 238 "b0100".U -> (LMUL + 1.U), 239 "b0010".U -> NFtoLmul(inst.IMM5_OPIVI(2, 0)), 240 "b0001".U -> (LMUL +& vs2Eew - SEW) 241 )) 242 private val vs2NotAlign = SrcType.isVp(io.decodedInst.srcType(1)) && RegNumNotAlign(inst.VS2, vs2Emul) 243 244 private val vdIsMask = lsMaskInst || acsbInst || cmpInst || maskArithmeticInst 245 private val vdIsSingleElem = redInst || redWideningInst || vmvSingleInst 246 private val vdEew = Mux(lsStrideInst, inst.WIDTH(1, 0), Mux(vdWideningInst || redWideningInst, SEW + 1.U, SEW)) 247 private val vdEmulSel = Cat((vdIsMask || vdIsSingleElem), vdWideningInst, vmvWholeInst, lsWholeInst, lsStrideInst) 248 private val vdEmul = LookupTreeDefault(vdEmulSel, LMUL, List( 249 "b10000".U -> "b100".U, 250 "b01000".U -> (LMUL + 1.U), 251 "b00100".U -> NFtoLmul(inst.IMM5_OPIVI(2, 0)), 252 "b00010".U -> NFtoLmul(inst.NF), 253 "b00001".U -> (LMUL +& vdEew - SEW) 254 )) 255 private val vdNotAlign = (SrcType.isVp(io.decodedInst.srcType(2)) || io.decodedInst.vecWen) && RegNumNotAlign(inst.VD, vdEmul) 256 257 private val regNumIllegal = isVArithMem && (vs1NotAlign || vs2NotAlign || vdNotAlign) 258 259 // 6. v0 Overlap 260 private val v0AllowOverlap = (vdIsMask || vdIsSingleElem) && !Seq(VMSBF_M, VMSIF_M, VMSOF_M).map(_ === inst.ALL).reduce(_ || _) 261 private val v0Overlap = isVArithMem && io.decodedInst.vecWen && inst.VM === 0.U && inst.VD === 0.U && !v0AllowOverlap 262 263 // 7. Src Reg Overlap 264 private val vs1RegLo = inst.VS1 265 private val vs1RegHi = inst.VS1 +& LmultoRegNum(vs1Emul) - 1.U 266 private val vs2RegLo = inst.VS2 267 private val vs2RegHi = inst.VS2 +& LmultoRegNum(vs2Emul) - 1.U 268 private val vdRegLo = inst.VD 269 private val vdRegHi = Mux(lsStrideInst || lsIndexInst, segRegMax - 1.U, inst.VD + LmultoRegNum(vdEmul) - 1.U) 270 271 private val notAllowOverlapInst = lsIndexInst && inst.NF =/= 0.U || Seq(VMSBF_M, VMSIF_M, VMSOF_M, VIOTA_M, 272 VSLIDEUP_VX, VSLIDEUP_VI, VSLIDE1UP_VX, VFSLIDE1UP_VF, VRGATHER_VV, VRGATHEREI16_VV, VRGATHER_VX, VRGATHER_VI, VCOMPRESS_VM).map(_ === inst.ALL).reduce(_ || _) 273 274 // 8. vstart Illegal 275 private val vstartIllegal = isVArith && (io.vstart =/= 0.U) 276 277 //vs1 278 private val vs1vdRegNotOverlap = vs1RegHi < vdRegLo || vdRegHi < vs1RegLo 279 private val vs1Constraint1 = vs1IsMask && vdIsMask || !vs1IsMask && !vdIsMask && vs1Eew === vdEew 280 private val vs1Constraint2 = (vdIsMask && !vs1IsMask || !vs1IsMask && !vdIsMask && vs1Eew > vdEew) && vdRegLo === vs1RegLo && vdRegHi <= vs1RegHi 281 private val vs1Constraint3 = (!vdIsMask && vs1IsMask || !vs1IsMask && !vdIsMask && vs1Eew < vdEew) && vs1Emul >= "b100".U && vdRegHi === vs1RegHi && vdRegLo <= vs1RegLo 282 private val vs1AllowOverlap = (vs1Constraint1 || vs1Constraint2 || vs1Constraint3 || vdIsSingleElem) && !notAllowOverlapInst 283 private val vs1vdOverlap = (SrcType.isVp(io.decodedInst.srcType(0)) && io.decodedInst.vecWen) && !vs1vdRegNotOverlap && !vs1AllowOverlap 284 //vs2 285 private val vs2vdRegNotOverlap = vs2RegHi < vdRegLo || vdRegHi < vs2RegLo 286 private val vs2Constraint1 = vs2IsMask && vdIsMask || !vs2IsMask && !vdIsMask && vs2Eew === vdEew 287 private val vs2Constraint2 = (vdIsMask && !vs2IsMask || !vs2IsMask && !vdIsMask && vs2Eew > vdEew) && vdRegLo === vs2RegLo && vdRegHi <= vs2RegHi 288 private val vs2Constraint3 = (!vdIsMask && vs2IsMask || !vs2IsMask && !vdIsMask && vs2Eew < vdEew) && vs2Emul >= "b100".U && vdRegHi === vs2RegHi && vdRegLo <= vs2RegLo 289 private val vs2AllowOverlap = (vs2Constraint1 || vs2Constraint2 || vs2Constraint3 || vdIsSingleElem) && !notAllowOverlapInst 290 private val vs2vdOverlap = (SrcType.isVp(io.decodedInst.srcType(1)) && io.decodedInst.vecWen) && !vs2vdRegNotOverlap && !vs2AllowOverlap 291 292 private val regOverlapIllegal = v0Overlap || vs1vdOverlap || vs2vdOverlap 293 294 io.illegalInst := instIllegal || villIllegal || eewIllegal || emulIllegal || regNumIllegal || regOverlapIllegal || vstartIllegal 295 dontTouch(instIllegal) 296 dontTouch(villIllegal) 297 dontTouch(eewIllegal) 298 dontTouch(emulIllegal) 299 dontTouch(regNumIllegal) 300 dontTouch(regOverlapIllegal) 301 dontTouch(notDependVtypeInst) 302 dontTouch(vstartIllegal) 303}