xref: /XiangShan/src/main/scala/xiangshan/backend/fu/wrapper/VFMA.scala (revision b1e920234888fd3e5463ceb2a99c9bdca087f585)
1package xiangshan.backend.fu.wrapper
2
3import org.chipsalliance.cde.config.Parameters
4import chisel3._
5import chisel3.util._
6import utils.XSError
7import xiangshan.backend.fu.FuConfig
8import xiangshan.backend.fu.vector.Bundles.VSew
9import xiangshan.backend.fu.vector.utils.VecDataSplitModule
10import xiangshan.backend.fu.vector.{Mgu, VecPipedFuncUnit}
11import xiangshan.ExceptionNO
12import yunsuan.VfpuType
13import yunsuan.VfmaType
14import yunsuan.vector.VectorFloatFMA
15
16class VFMA(cfg: FuConfig)(implicit p: Parameters) extends VecPipedFuncUnit(cfg) {
17  XSError(io.in.valid && io.in.bits.ctrl.fuOpType === VfpuType.dummy, "Vfalu OpType not supported")
18
19  // params alias
20  private val dataWidth = cfg.dataBits
21  private val dataWidthOfDataModule = 64
22  private val numVecModule = dataWidth / dataWidthOfDataModule
23
24  // io alias
25  private val opcode  = fuOpType(3,0)
26  private val resWiden  = fuOpType(4)
27
28  // modules
29  private val vfmas = Seq.fill(numVecModule)(Module(new VectorFloatFMA))
30  private val vs2Split = Module(new VecDataSplitModule(dataWidth, dataWidthOfDataModule))
31  private val vs1Split = Module(new VecDataSplitModule(dataWidth, dataWidthOfDataModule))
32  private val oldVdSplit  = Module(new VecDataSplitModule(dataWidth, dataWidthOfDataModule))
33  private val mgu = Module(new Mgu(dataWidth))
34
35  /**
36    * In connection of [[vs2Split]], [[vs1Split]] and [[oldVdSplit]]
37    */
38  vs2Split.io.inVecData := vs2
39  vs1Split.io.inVecData := vs1
40  oldVdSplit.io.inVecData := oldVd
41
42  /**
43    * [[vfmas]]'s in connection
44    */
45  // Vec(vs2(31,0), vs2(63,32), vs2(95,64), vs2(127,96)) ==>
46  // Vec(
47  //   Cat(vs2(95,64),  vs2(31,0)),
48  //   Cat(vs2(127,96), vs2(63,32)),
49  // )
50  private val vs2GroupedVec: Vec[UInt] = VecInit(vs2Split.io.outVec32b.zipWithIndex.groupBy(_._2 % 2).map(x => x._1 -> x._2.map(_._1)).values.map(x => Cat(x.reverse)).toSeq)
51  private val vs1GroupedVec: Vec[UInt] = VecInit(vs1Split.io.outVec32b.zipWithIndex.groupBy(_._2 % 2).map(x => x._1 -> x._2.map(_._1)).values.map(x => Cat(x.reverse)).toSeq)
52  private val resultData = Wire(Vec(numVecModule, UInt(dataWidthOfDataModule.W)))
53  private val fflagsData = Wire(Vec(numVecModule, UInt(20.W)))
54  val fp_aIsFpCanonicalNAN = Wire(Vec(numVecModule, Bool()))
55  val fp_bIsFpCanonicalNAN = Wire(Vec(numVecModule, Bool()))
56  val fp_cIsFpCanonicalNAN = Wire(Vec(numVecModule, Bool()))
57  vfmas.zipWithIndex.foreach {
58    case (mod, i) =>
59      mod.io.fp_a         := vs2Split.io.outVec64b(i)
60      mod.io.fp_b         := vs1Split.io.outVec64b(i)
61      mod.io.fp_c         := oldVdSplit.io.outVec64b(i)
62      mod.io.widen_a      := Cat(vs2Split.io.outVec32b(i+numVecModule), vs2Split.io.outVec32b(i))
63      mod.io.widen_b      := Cat(vs1Split.io.outVec32b(i+numVecModule), vs1Split.io.outVec32b(i))
64      mod.io.frs1         := 0.U     // already vf -> vv
65      mod.io.is_frs1      := false.B // already vf -> vv
66      mod.io.uop_idx      := vuopIdx(0)
67      mod.io.is_vec       := true.B // Todo
68      mod.io.round_mode   := frm
69      mod.io.fp_format    := Mux(resWiden, vsew + 1.U, vsew)
70      mod.io.res_widening := resWiden
71      mod.io.op_code      := opcode
72      resultData(i) := mod.io.fp_result
73      fflagsData(i) := mod.io.fflags
74      fp_aIsFpCanonicalNAN(i) := vecCtrl.fpu.isFpToVecInst & (
75        ((vsew === VSew.e32) & (!vs2Split.io.outVec64b(i).head(32).andR)) |
76          ((vsew === VSew.e16) & (!vs2Split.io.outVec64b(i).head(48).andR))
77        )
78      fp_bIsFpCanonicalNAN(i) := vecCtrl.fpu.isFpToVecInst & (
79        ((vsew === VSew.e32) & (!vs1Split.io.outVec64b(i).head(32).andR)) |
80          ((vsew === VSew.e16) & (!vs1Split.io.outVec64b(i).head(48).andR))
81        )
82      fp_cIsFpCanonicalNAN(i) := !(opcode === VfmaType.vfmul) & vecCtrl.fpu.isFpToVecInst & (
83        ((vsew === VSew.e32) & (!oldVdSplit.io.outVec64b(i).head(32).andR)) |
84          ((vsew === VSew.e16) & (!oldVdSplit.io.outVec64b(i).head(48).andR))
85        )
86      mod.io.fp_aIsFpCanonicalNAN := fp_aIsFpCanonicalNAN(i)
87      mod.io.fp_bIsFpCanonicalNAN := fp_bIsFpCanonicalNAN(i)
88      mod.io.fp_cIsFpCanonicalNAN := fp_cIsFpCanonicalNAN(i)
89  }
90
91  val outEew = Mux(RegNext(resWiden), outVecCtrl.vsew + 1.U, outVecCtrl.vsew)
92  val outVuopidx = outVecCtrl.vuopIdx(2, 0)
93  val vlMax = ((VLEN / 8).U >> outEew).asUInt
94  val lmulAbs = Mux(outVecCtrl.vlmul(2), (~outVecCtrl.vlmul(1, 0)).asUInt + 1.U, outVecCtrl.vlmul(1, 0))
95  val outVlFix = Mux(outVecCtrl.fpu.isFpToVecInst, 1.U, outVl)
96  val vlMaxAllUop = Wire(outVl.cloneType)
97  vlMaxAllUop := Mux(outVecCtrl.vlmul(2), vlMax >> lmulAbs, vlMax << lmulAbs).asUInt
98  val vlMaxThisUop = Mux(outVecCtrl.vlmul(2), vlMax >> lmulAbs, vlMax).asUInt
99  val vlSetThisUop = Mux(outVlFix > outVuopidx * vlMaxThisUop, outVlFix - outVuopidx * vlMaxThisUop, 0.U)
100  val vlThisUop = Wire(UInt(3.W))
101  vlThisUop := Mux(vlSetThisUop < vlMaxThisUop, vlSetThisUop, vlMaxThisUop)
102  val vlMaskRShift = Wire(UInt((4 * numVecModule).W))
103  vlMaskRShift := Fill(4 * numVecModule, 1.U(1.W)) >> ((4 * numVecModule).U - vlThisUop)
104
105  private val needNoMask = outVecCtrl.fpu.isFpToVecInst
106  val maskToMgu = Mux(needNoMask, allMaskTrue, outSrcMask)
107  val allFFlagsEn = Wire(Vec(4 * numVecModule, Bool()))
108  val outSrcMaskRShift = Wire(UInt((4 * numVecModule).W))
109  outSrcMaskRShift := (maskToMgu >> (outVecCtrl.vuopIdx(2, 0) * vlMax))(4 * numVecModule - 1, 0)
110  val f16FFlagsEn = outSrcMaskRShift
111  val f32FFlagsEn = Wire(Vec(numVecModule, UInt(4.W)))
112  for (i <- 0 until numVecModule) {
113    f32FFlagsEn(i) := Cat(Fill(2, 1.U), outSrcMaskRShift(2 * i + 1, 2 * i))
114  }
115  val f64FFlagsEn = Wire(Vec(numVecModule, UInt(4.W)))
116  for (i <- 0 until numVecModule) {
117    f64FFlagsEn(i) := Cat(Fill(3, 1.U), outSrcMaskRShift(i))
118  }
119  val fflagsEn = Mux1H(
120    Seq(
121      (outEew === 1.U) -> f16FFlagsEn.asUInt,
122      (outEew === 2.U) -> f32FFlagsEn.asUInt,
123      (outEew === 3.U) -> f64FFlagsEn.asUInt
124    )
125  )
126  allFFlagsEn := (fflagsEn & vlMaskRShift).asTypeOf(allFFlagsEn)
127
128  val allFFlags = fflagsData.asTypeOf(Vec(4 * numVecModule, UInt(5.W)))
129  val outFFlags = allFFlagsEn.zip(allFFlags).map {
130    case (en, fflags) => Mux(en, fflags, 0.U(5.W))
131  }.reduce(_ | _)
132  io.out.bits.res.fflags.get := outFFlags
133
134  val resultDataUInt = resultData.asUInt
135  mgu.io.in.vd := resultDataUInt
136  mgu.io.in.oldVd := outOldVd
137  mgu.io.in.mask := maskToMgu
138  mgu.io.in.info.ta := outVecCtrl.vta
139  mgu.io.in.info.ma := outVecCtrl.vma
140  mgu.io.in.info.vl := Mux(outVecCtrl.fpu.isFpToVecInst, 1.U, outVl)
141  mgu.io.in.info.vlmul := outVecCtrl.vlmul
142  mgu.io.in.info.valid := io.out.valid
143  mgu.io.in.info.vstart := Mux(outVecCtrl.fpu.isFpToVecInst, 0.U, outVecCtrl.vstart)
144  mgu.io.in.info.eew := outEew
145  mgu.io.in.info.vsew := outVecCtrl.vsew
146  mgu.io.in.info.vdIdx := outVecCtrl.vuopIdx
147  mgu.io.in.info.narrow := outVecCtrl.isNarrow
148  mgu.io.in.info.dstMask := outVecCtrl.isDstMask
149  mgu.io.in.isIndexedVls := false.B
150  io.out.bits.res.data := mgu.io.out.vd
151  io.out.bits.ctrl.exceptionVec.get(ExceptionNO.illegalInstr) := mgu.io.out.illegal
152}
153