xref: /XiangShan/src/main/scala/xiangshan/backend/decode/VecExceptionGen.scala (revision bb2f3f51dd67f6e16e0cc1ffe43368c9fc7e4aef)
1package xiangshan.backend.decode
2
3import org.chipsalliance.cde.config.Parameters
4import chisel3._
5import chisel3.util._
6import freechips.rocketchip.rocket.Instructions._
7import freechips.rocketchip.util.uintToBitPat
8import utility._
9import utils._
10import xiangshan._
11import xiangshan.backend.Bundles.{DecodedInst, DynInst, StaticInst}
12import xiangshan.backend.fu.FuType
13import xiangshan.backend.fu.vector.Bundles._
14import xiangshan.backend.decode.isa.bitfield.{InstVType, XSInstBitFields}
15
16object RegNumNotAlign {
17  def apply(reg: UInt, emul: UInt): Bool = {
18    emul === "b101".U && reg(0) =/= 0.U || emul === "b110".U && reg(1, 0) =/= 0.U || emul === "b111".U && reg(2, 0) =/= 0.U
19  }
20}
21
22object NFtoLmul {
23  def apply(nf: UInt): UInt = {
24    LookupTree(nf, List(
25      "b000".U -> 4.U,
26      "b001".U -> 5.U,
27      "b011".U -> 6.U,
28      "b111".U -> 7.U
29    ))
30  }
31}
32
33object LmultoRegNum {
34  def apply(lmul: UInt): UInt = {
35    val numPow = Mux(lmul(2).asBool, lmul(1, 0), 0.U(2.W))
36    val regNum = 1.U << numPow
37    regNum
38  }
39}
40
41class VecExceptionGen(implicit p: Parameters) extends XSModule{
42  val io = IO(new Bundle(){
43    val inst = Input(UInt(32.W))
44    val decodedInst = Input(new DecodedInst)
45    val vtype = Input(new VType)
46    val vstart = Input(Vl())
47
48    val illegalInst = Output(Bool())
49  })
50
51  private val inst: XSInstBitFields = io.inst.asTypeOf(new XSInstBitFields)
52  private val isVArithMem = FuType.isVArithMem(io.decodedInst.fuType)
53  private val isVArith = FuType.isVArith(io.decodedInst.fuType)
54  private val isVset = FuType.isVset(io.decodedInst.fuType)
55
56  private val SEW = io.vtype.vsew(1, 0)
57  private val LMUL = Cat(~io.vtype.vlmul(2), io.vtype.vlmul(1, 0))
58
59  private val lsStrideInst = Seq(
60    VLE8_V, VLE16_V, VLE32_V, VLE64_V, VSE8_V, VSE16_V, VSE32_V, VSE64_V,
61    VLSE8_V, VLSE16_V, VLSE32_V, VLSE64_V, VSSE8_V, VSSE16_V, VSSE32_V, VSSE64_V,
62    VLE8FF_V, VLE16FF_V, VLE32FF_V, VLE64FF_V
63  ).map(_ === inst.ALL).reduce(_ || _)
64
65  private val lsMaskInst = Seq(
66    VLM_V, VSM_V
67  ).map(_ === inst.ALL).reduce(_ || _)
68
69  private val lsIndexInst = Seq(
70    VLUXEI8_V, VLUXEI16_V, VLUXEI32_V, VLUXEI64_V, VLOXEI8_V, VLOXEI16_V, VLOXEI32_V, VLOXEI64_V,
71    VSUXEI8_V, VSUXEI16_V, VSUXEI32_V, VSUXEI64_V, VSOXEI8_V, VSOXEI16_V, VSOXEI32_V, VSOXEI64_V
72  ).map(_ === inst.ALL).reduce(_ || _)
73
74  private val lsWholeInst = Seq(
75    VL1RE8_V, VL1RE16_V, VL1RE32_V, VL1RE64_V,
76    VL2RE8_V, VL2RE16_V, VL2RE32_V, VL2RE64_V,
77    VL4RE8_V, VL4RE16_V, VL4RE32_V, VL4RE64_V,
78    VL8RE8_V, VL8RE16_V, VL8RE32_V, VL8RE64_V,
79    VS1R_V, VS2R_V, VS4R_V, VS8R_V
80  ).map(_ === inst.ALL).reduce(_ || _)
81
82  private val vdWideningInst = Seq(
83    //int
84    VWADD_VV, VWADD_VX, VWADD_WV, VWADD_WX, VWADDU_VV, VWADDU_VX, VWADDU_WV, VWADDU_WX,
85    VWMACC_VV, VWMACC_VX, VWMACCSU_VV, VWMACCSU_VX, VWMACCU_VV, VWMACCU_VX, VWMACCUS_VX,
86    VWMUL_VV, VWMUL_VX, VWMULSU_VV, VWMULSU_VX, VWMULU_VV, VWMULU_VX,
87    VWSUB_VV, VWSUB_VX, VWSUB_WV, VWSUB_WX, VWSUBU_VV, VWSUBU_VX, VWSUBU_WV, VWSUBU_WX,
88    //fp
89    VFWADD_VF, VFWADD_VV, VFWADD_WF, VFWADD_WV, VFWSUB_VF, VFWSUB_VV, VFWSUB_WF, VFWSUB_WV,
90    VFWMUL_VF, VFWMUL_VV,
91    VFWMACC_VF, VFWMACC_VV, VFWMSAC_VF, VFWMSAC_VV, VFWNMACC_VF, VFWNMACC_VV, VFWNMSAC_VF, VFWNMSAC_VV,
92    VFWCVT_F_F_V, VFWCVT_F_X_V, VFWCVT_F_XU_V, VFWCVT_RTZ_X_F_V, VFWCVT_RTZ_XU_F_V, VFWCVT_X_F_V, VFWCVT_XU_F_V
93  ).map(_ === inst.ALL).reduce(_ || _)
94
95  private val vs2WideningInst = Seq(
96    //int
97    VWADD_WV, VWADD_WX, VWADDU_WV, VWADDU_WX,
98    VWSUB_WV, VWSUB_WX, VWSUBU_WV, VWSUBU_WX,
99    //fp
100    VFWADD_WF, VFWADD_WV, VFWSUB_WF, VFWSUB_WV
101  ).map(_ === inst.ALL).reduce(_ || _)
102
103  private val narrowingInst = Seq(
104    //int
105    VNCLIP_WI, VNCLIP_WV, VNCLIP_WX, VNCLIPU_WI, VNCLIPU_WV, VNCLIPU_WX,
106    VNSRA_WI, VNSRA_WV, VNSRA_WX, VNSRL_WI, VNSRL_WV, VNSRL_WX,
107    //fp
108    VFNCVT_F_F_W, VFNCVT_F_X_W, VFNCVT_F_XU_W, VFNCVT_ROD_F_F_W, VFNCVT_RTZ_X_F_W, VFNCVT_RTZ_XU_F_W, VFNCVT_X_F_W, VFNCVT_XU_F_W
109  ).map(_ === inst.ALL).reduce(_ || _)
110
111  private val intExtInst = Seq(
112    VSEXT_VF2, VSEXT_VF4, VSEXT_VF8, VZEXT_VF2, VZEXT_VF4, VZEXT_VF8
113  ).map(_ === inst.ALL).reduce(_ || _)
114
115  private val acsbInst = Seq(
116    VMADC_VI, VMADC_VIM, VMADC_VV, VMADC_VVM, VMADC_VX, VMADC_VXM,
117    VMSBC_VV, VMSBC_VVM, VMSBC_VX, VMSBC_VXM
118  ).map(_ === inst.ALL).reduce(_ || _)
119
120  private val cmpInst = Seq(
121    //int
122    VMSEQ_VI, VMSEQ_VV, VMSEQ_VX,
123    VMSGT_VI, VMSGT_VX, VMSGTU_VI, VMSGTU_VX,
124    VMSLE_VI, VMSLE_VV, VMSLE_VX, VMSLEU_VI, VMSLEU_VV, VMSLEU_VX,
125    VMSLT_VV, VMSLT_VX, VMSLTU_VV, VMSLTU_VX,
126    VMSNE_VI, VMSNE_VV, VMSNE_VX,
127    //fp
128    VMFEQ_VF, VMFEQ_VV, VMFNE_VF, VMFNE_VV,
129    VMFGE_VF, VMFGT_VF, VMFLE_VF, VMFLE_VV, VMFLT_VF, VMFLT_VV
130  ).map(_ === inst.ALL).reduce(_ || _)
131
132  private val redInst = Seq(
133    VREDAND_VS, VREDMAX_VS, VREDMAXU_VS, VREDMIN_VS, VREDMINU_VS, VREDOR_VS, VREDSUM_VS, VREDXOR_VS,
134    VFREDMAX_VS, VFREDMIN_VS, VFREDOSUM_VS, VFREDUSUM_VS
135  ).map(_ === inst.ALL).reduce(_ || _)
136
137  private val redWideningInst = Seq(
138    VWREDSUM_VS, VWREDSUMU_VS,
139    VFWREDOSUM_VS, VFWREDUSUM_VS
140  ).map(_ === inst.ALL).reduce(_ || _)
141
142  private val maskLogicalInst = Seq(
143    VMAND_MM, VMNAND_MM, VMANDN_MM, VMXOR_MM, VMOR_MM, VMNOR_MM, VMORN_MM, VMXNOR_MM
144  ).map(_ === inst.ALL).reduce(_ || _)
145
146  private val maskArithmeticInst = Seq(
147    VCPOP_M, VFIRST_M, VMSBF_M, VMSIF_M, VMSOF_M
148  ).map(_ === inst.ALL).reduce(_ || _) || maskLogicalInst
149
150  private val maskIndexInst = Seq(
151    VIOTA_M, VID_V
152  ).map(_ === inst.ALL).reduce(_ || _)
153
154  private val vmvSingleInst = Seq(
155    VMV_X_S, VMV_S_X, VFMV_F_S, VFMV_S_F
156  ).map(_ === inst.ALL).reduce(_ || _)
157
158  private val vmvWholeInst = Seq(
159    VMV1R_V, VMV2R_V, VMV4R_V, VMV8R_V
160  ).map(_ === inst.ALL).reduce(_ || _)
161
162  private val vrgather16 = VRGATHEREI16_VV === inst.ALL
163  private val vcompress = VCOMPRESS_VM === inst.ALL
164  private val intExt2 = Seq(VSEXT_VF2, VZEXT_VF2).map(_ === inst.ALL).reduce(_ || _)
165  private val intExt4 = Seq(VSEXT_VF4, VZEXT_VF4).map(_ === inst.ALL).reduce(_ || _)
166  private val intExt8 = Seq(VSEXT_VF8, VZEXT_VF8).map(_ === inst.ALL).reduce(_ || _)
167
168  private val notDependVtypeInst = Seq(VSETVLI, VSETIVLI, VSETVL).map(_ === inst.ALL).reduce(_ || _) || lsWholeInst
169
170
171  // 1. inst Illegal
172  private val instIllegal = maskLogicalInst && inst.VM === 0.U
173
174  // 2. vill Illegal
175  private val villIllegal = io.vtype.illegal && isVArithMem && !notDependVtypeInst
176
177  // 3. EEW Illegal
178  private val doubleFpInst = Seq(
179    VFWCVT_F_X_V, VFWCVT_F_XU_V, VFNCVT_RTZ_X_F_W, VFNCVT_RTZ_XU_F_W, VFNCVT_X_F_W, VFNCVT_XU_F_W
180  ).map(_ === inst.ALL).reduce(_ || _)
181  private val fpEewIllegal = FuType.isVecOPF(io.decodedInst.fuType) && !doubleFpInst && (SEW <= 1.U)
182
183  private val intExtEewIllegal = intExt2 && SEW === 0.U ||
184                                 intExt4 && SEW <= 1.U ||
185                                 intExt8 && SEW <= 2.U
186
187  private val wnEewIllegal = (vdWideningInst || narrowingInst || redWideningInst) && SEW === 3.U
188
189  private val eewIllegal = fpEewIllegal || intExtEewIllegal || wnEewIllegal
190
191  // 4. EMUL Illegal
192  private val lsEmulIllegal = (lsStrideInst || lsIndexInst) && (LMUL +& inst.WIDTH(1, 0) < SEW +& 1.U || LMUL +& inst.WIDTH(1, 0) > SEW +& 7.U)
193
194  private val intExtEmulIllegal = intExt2 && LMUL === 1.U ||
195                                  intExt4 && LMUL <= 2.U ||
196                                  intExt8 && LMUL <= 3.U
197
198  private val wnEmulIllegal = (vdWideningInst || narrowingInst || redWideningInst) && LMUL === 7.U
199
200  private val gather16EmulIllegal = vrgather16 && (LMUL < SEW || LMUL > SEW +& 6.U)
201
202  private val NFIELDS = inst.NF +& 1.U
203  private val segEmul = Mux(lsIndexInst, LMUL, LMUL +& inst.WIDTH(1, 0) - SEW)
204  private val emulNumPow = Mux(segEmul(2), segEmul(1, 0), 0.U(2.W))
205  private val segRegNum = NFIELDS << emulNumPow
206  private val segRegMax = inst.VD +& segRegNum
207
208  private val lsSegIllegal = (lsStrideInst || lsIndexInst) && inst.NF =/= 0.U && (segRegNum > 8.U || segRegMax > 32.U)
209
210  private val emulIllegal = lsEmulIllegal || intExtEmulIllegal || wnEmulIllegal || gather16EmulIllegal || lsSegIllegal
211
212  // 5. Reg Number Align
213  private val vs1IsMask = maskArithmeticInst || vcompress
214  private val vs1IsSingleElem = redInst || redWideningInst
215  private val vs1Eew = Mux(vrgather16, "b01".U, SEW)
216  private val vs1Emul = Mux(vs1IsMask || vs1IsSingleElem, "b100".U, Mux(vrgather16, LMUL +& 1.U - SEW, LMUL))
217  private val vs1NotAlign = SrcType.isVp(io.decodedInst.srcType(0)) && RegNumNotAlign(inst.VS1, vs1Emul)
218
219  private val vs2IsMask = maskArithmeticInst || maskIndexInst
220  private val vs2IsSingleElem = vmvSingleInst
221  private val vs2EewSel = Cat(lsIndexInst, (vs2WideningInst || narrowingInst || redWideningInst), intExt2, intExt4, intExt8)
222  private val vs2Eew = LookupTreeDefault(vs2EewSel, SEW, List(
223    "b10000".U  -> inst.WIDTH(1, 0),
224    "b01000".U  -> (SEW + 1.U),
225    "b00100".U  -> (SEW - 1.U),
226    "b00010".U  -> (SEW - 2.U),
227    "b00001".U  -> (SEW - 3.U)
228  ))
229  private val vs2EmulSel = Cat((vs2IsMask || vs2IsSingleElem), (vs2WideningInst || narrowingInst), vmvWholeInst, (intExtInst || lsIndexInst))
230  private val vs2Emul = LookupTreeDefault(vs2EmulSel, LMUL, List(
231    "b1000".U  -> "b100".U,
232    "b0100".U  -> (LMUL + 1.U),
233    "b0010".U  -> NFtoLmul(inst.IMM5_OPIVI(2, 0)),
234    "b0001".U  -> (LMUL +& vs2Eew - SEW)
235  ))
236  private val vs2NotAlign = SrcType.isVp(io.decodedInst.srcType(1)) && RegNumNotAlign(inst.VS2, vs2Emul)
237
238  private val vdIsMask = lsMaskInst || acsbInst || cmpInst || maskArithmeticInst
239  private val vdIsSingleElem = redInst || redWideningInst || vmvSingleInst
240  private val vdEew = Mux(lsStrideInst, inst.WIDTH(1, 0), Mux(vdWideningInst || redWideningInst, SEW + 1.U, SEW))
241  private val vdEmulSel = Cat((vdIsMask || vdIsSingleElem), vdWideningInst, vmvWholeInst, lsWholeInst, lsStrideInst)
242  private val vdEmul = LookupTreeDefault(vdEmulSel, LMUL, List(
243    "b10000".U  -> "b100".U,
244    "b01000".U  -> (LMUL + 1.U),
245    "b00100".U  -> NFtoLmul(inst.IMM5_OPIVI(2, 0)),
246    "b00010".U  -> NFtoLmul(inst.NF),
247    "b00001".U  -> (LMUL +& vdEew - SEW)
248  ))
249  private val vdNotAlign = (SrcType.isVp(io.decodedInst.srcType(2)) || io.decodedInst.vecWen) && RegNumNotAlign(inst.VD, vdEmul)
250
251  private val regNumIllegal = isVArithMem && (vs1NotAlign || vs2NotAlign || vdNotAlign)
252
253  // 6. v0 Overlap
254  private val v0AllowOverlap = (vdIsMask || vdIsSingleElem) && !Seq(VMSBF_M, VMSIF_M, VMSOF_M).map(_ === inst.ALL).reduce(_ || _)
255  private val v0Overlap = isVArithMem && io.decodedInst.vecWen && inst.VM === 0.U && inst.VD === 0.U && !v0AllowOverlap
256
257  // 7. Src Reg Overlap
258  private val vs1RegLo = inst.VS1
259  private val vs1RegHi = inst.VS1 +& LmultoRegNum(vs1Emul) - 1.U
260  private val vs2RegLo = inst.VS2
261  private val vs2RegHi = inst.VS2 +& LmultoRegNum(vs2Emul) - 1.U
262  private val vdRegLo = inst.VD
263  private val vdRegHi = Mux(lsStrideInst || lsIndexInst, segRegMax - 1.U, inst.VD + LmultoRegNum(vdEmul) - 1.U)
264
265  private val notAllowOverlapInst = lsIndexInst && inst.NF =/= 0.U || Seq(VMSBF_M, VMSIF_M, VMSOF_M, VIOTA_M,
266                                    VSLIDEUP_VX, VSLIDEUP_VI, VSLIDE1UP_VX, VFSLIDE1UP_VF, VRGATHER_VV, VRGATHEREI16_VV, VRGATHER_VX, VRGATHER_VI, VCOMPRESS_VM).map(_ === inst.ALL).reduce(_ || _)
267
268  // 8. vstart Illegal
269  private val vstartIllegal = isVArith && (io.vstart =/= 0.U)
270
271  //vs1
272  private val vs1vdRegNotOverlap = vs1RegHi < vdRegLo || vdRegHi < vs1RegLo
273  private val vs1Constraint1 = vs1IsMask && vdIsMask || !vs1IsMask && !vdIsMask && vs1Eew === vdEew
274  private val vs1Constraint2 = (vdIsMask && !vs1IsMask || !vs1IsMask && !vdIsMask && vs1Eew > vdEew) && vdRegLo === vs1RegLo && vdRegHi <= vs1RegHi
275  private val vs1Constraint3 = (!vdIsMask && vs1IsMask || !vs1IsMask && !vdIsMask && vs1Eew < vdEew) && vs1Emul >= "b100".U && vdRegHi === vs1RegHi && vdRegLo <= vs1RegLo
276  private val vs1AllowOverlap = (vs1Constraint1 || vs1Constraint2 || vs1Constraint3 || vdIsSingleElem) && !notAllowOverlapInst
277  private val vs1vdOverlap = (SrcType.isVp(io.decodedInst.srcType(0)) && io.decodedInst.vecWen) && !vs1vdRegNotOverlap && !vs1AllowOverlap
278  //vs2
279  private val vs2vdRegNotOverlap = vs2RegHi < vdRegLo || vdRegHi < vs2RegLo
280  private val vs2Constraint1 = vs2IsMask && vdIsMask || !vs2IsMask && !vdIsMask && vs2Eew === vdEew
281  private val vs2Constraint2 = (vdIsMask && !vs2IsMask || !vs2IsMask && !vdIsMask && vs2Eew > vdEew) && vdRegLo === vs2RegLo && vdRegHi <= vs2RegHi
282  private val vs2Constraint3 = (!vdIsMask && vs2IsMask || !vs2IsMask && !vdIsMask && vs2Eew < vdEew) && vs2Emul >= "b100".U && vdRegHi === vs2RegHi && vdRegLo <= vs2RegLo
283  private val vs2AllowOverlap = (vs2Constraint1 || vs2Constraint2 || vs2Constraint3 || vdIsSingleElem) && !notAllowOverlapInst
284  private val vs2vdOverlap = (SrcType.isVp(io.decodedInst.srcType(1)) && io.decodedInst.vecWen) && !vs2vdRegNotOverlap && !vs2AllowOverlap
285
286  private val regOverlapIllegal = v0Overlap || vs1vdOverlap || vs2vdOverlap
287
288  io.illegalInst := instIllegal || villIllegal || eewIllegal || emulIllegal || regNumIllegal || regOverlapIllegal || vstartIllegal
289  dontTouch(instIllegal)
290  dontTouch(villIllegal)
291  dontTouch(eewIllegal)
292  dontTouch(emulIllegal)
293  dontTouch(regNumIllegal)
294  dontTouch(regOverlapIllegal)
295  dontTouch(notDependVtypeInst)
296  dontTouch(vstartIllegal)
297}