xref: /XiangShan/src/main/scala/xiangshan/mem/vector/VecCommon.scala (revision d0d2c22d6f8c96ee7c45325f375b17a0243a8259)
1/***************************************************************************************
2  * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3  * Copyright (c) 2020-2021 Peng Cheng Laboratory
4  *
5  * XiangShan is licensed under Mulan PSL v2.
6  * You can use this software according to the terms and conditions of the Mulan PSL v2.
7  * You may obtain a copy of Mulan PSL v2 at:
8  *          http://license.coscl.org.cn/MulanPSL2
9  *
10  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11  * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12  * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13  *
14  * See the Mulan PSL v2 for more details.
15  ***************************************************************************************/
16
17package xiangshan.mem
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import utility._
24import xiangshan._
25import xiangshan.backend.rob.RobPtr
26import xiangshan.backend.Bundles._
27import xiangshan.backend.fu.FuType
28import xiangshan.backend.fu.vector.Bundles.VEew
29
30/**
31  * Common used parameters or functions in vlsu
32  */
33trait VLSUConstants {
34  val VLEN = 128
35  //for pack unit-stride flow
36  val AlignedNum = 4 // 1/2/4/8
37  def VLENB = VLEN/8
38  def vOffsetBits = log2Up(VLENB) // bits-width to index offset inside a vector reg
39  lazy val vlmBindexBits = 8 //will be overrided later
40  lazy val vsmBindexBits = 8 // will be overrided later
41
42  def alignTypes = 5 // eew/sew = 1/2/4/8, last indicate 128 bit element
43  def alignTypeBits = log2Up(alignTypes)
44  def maxMUL = 8
45  def maxFields = 8
46  /**
47    * In the most extreme cases like a segment indexed instruction, eew=64, emul=8, sew=8, lmul=1,
48    * and nf=8, each data reg is mapped with 8 index regs and there are 8 data regs in total,
49    * each for a field. Therefore an instruction can be divided into 64 uops at most.
50    */
51  def maxUopNum = maxMUL * maxFields // 64
52  def maxFlowNum = 16
53  def maxElemNum = maxMUL * maxFlowNum // 128
54  // def uopIdxBits = log2Up(maxUopNum) // to index uop inside an robIdx
55  def elemIdxBits = log2Up(maxElemNum) + 1 // to index which element in an instruction
56  def flowIdxBits = log2Up(maxFlowNum) + 1 // to index which flow in a uop
57  def fieldBits = log2Up(maxFields) + 1 // 4-bits to indicate 1~8
58
59  def ewBits = 3 // bits-width of EEW/SEW
60  def mulBits = 3 // bits-width of emul/lmul
61
62  def getSlice(data: UInt, i: Int, alignBits: Int): UInt = {
63    require(data.getWidth >= (i+1) * alignBits)
64    data((i+1) * alignBits - 1, i * alignBits)
65  }
66  def getNoAlignedSlice(data: UInt, i: Int, alignBits: Int): UInt = {
67    data(i * 8 + alignBits - 1, i * 8)
68  }
69
70  def getByte(data: UInt, i: Int = 0) = getSlice(data, i, 8)
71  def getHalfWord(data: UInt, i: Int = 0) = getSlice(data, i, 16)
72  def getWord(data: UInt, i: Int = 0) = getSlice(data, i, 32)
73  def getDoubleWord(data: UInt, i: Int = 0) = getSlice(data, i, 64)
74  def getDoubleDoubleWord(data: UInt, i: Int = 0) = getSlice(data, i, 128)
75}
76
77trait HasVLSUParameters extends HasXSParameter with VLSUConstants {
78  override val VLEN = coreParams.VLEN
79  override lazy val vlmBindexBits = log2Up(coreParams.VlMergeBufferSize)
80  override lazy val vsmBindexBits = log2Up(coreParams.VsMergeBufferSize)
81  lazy val maxMemByteNum = 16 // Maximum bytes for a single memory access
82  /**
83   * get addr aligned low bits
84   * @param addr Address to be check
85   * @param width Width for checking alignment
86   */
87  def getCheckAddrLowBits(addr: UInt, width: Int): UInt = addr(log2Up(width) - 1, 0)
88  def getOverflowBit(in: UInt, width: Int): UInt = in(log2Up(width))
89  def isUnitStride(instType: UInt) = instType(1, 0) === "b00".U
90  def isStrided(instType: UInt) = instType(1, 0) === "b10".U
91  def isIndexed(instType: UInt) = instType(0) === "b1".U
92  def isNotIndexed(instType: UInt) = instType(0) === "b0".U
93  def isSegment(instType: UInt) = instType(2) === "b1".U
94  def is128Bit(alignedType: UInt) = alignedType(2) === "b1".U
95
96  def mergeDataWithMask(oldData: UInt, newData: UInt, mask: UInt): Vec[UInt] = {
97    require(oldData.getWidth == newData.getWidth)
98    require(oldData.getWidth == mask.getWidth * 8)
99    VecInit(mask.asBools.zipWithIndex.map { case (en, i) =>
100      Mux(en, getByte(newData, i), getByte(oldData, i))
101    })
102  }
103
104  // def asBytes(data: UInt) = {
105  //   require(data.getWidth % 8 == 0)
106  //   (0 until data.getWidth/8).map(i => getByte(data, i))
107  // }
108
109  def mergeDataWithElemIdx(
110    oldData: UInt,
111    newData: Seq[UInt],
112    alignedType: UInt,
113    elemIdx: Seq[UInt],
114    valids: Seq[Bool]
115  ): UInt = {
116    require(newData.length == elemIdx.length)
117    require(newData.length == valids.length)
118    LookupTree(alignedType, List(
119      "b00".U -> VecInit(elemIdx.map(e => UIntToOH(e(3, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) =>
120        ParallelPosteriorityMux(
121          true.B +: selVec.zip(valids).map(x => x._1 && x._2),
122          getByte(oldData, i) +: newData.map(getByte(_))
123        )}).asUInt,
124      "b01".U -> VecInit(elemIdx.map(e => UIntToOH(e(2, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) =>
125        ParallelPosteriorityMux(
126          true.B +: selVec.zip(valids).map(x => x._1 && x._2),
127          getHalfWord(oldData, i) +: newData.map(getHalfWord(_))
128        )}).asUInt,
129      "b10".U -> VecInit(elemIdx.map(e => UIntToOH(e(1, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) =>
130        ParallelPosteriorityMux(
131          true.B +: selVec.zip(valids).map(x => x._1 && x._2),
132          getWord(oldData, i) +: newData.map(getWord(_))
133        )}).asUInt,
134      "b11".U -> VecInit(elemIdx.map(e => UIntToOH(e(0)).asBools).transpose.zipWithIndex.map { case (selVec, i) =>
135        ParallelPosteriorityMux(
136          true.B +: selVec.zip(valids).map(x => x._1 && x._2),
137          getDoubleWord(oldData, i) +: newData.map(getDoubleWord(_))
138        )}).asUInt
139    ))
140  }
141
142  def mergeDataWithElemIdx(oldData: UInt, newData: UInt, alignedType: UInt, elemIdx: UInt): UInt = {
143    mergeDataWithElemIdx(oldData, Seq(newData), alignedType, Seq(elemIdx), Seq(true.B))
144  }
145  /**
146    * for merge 128-bits data of unit-stride
147    */
148  object mergeDataByByte{
149    def apply(oldData: UInt, newData: UInt, mask: UInt): UInt = {
150      val selVec = Seq(mask).map(_.asBools).transpose
151      VecInit(selVec.zipWithIndex.map{ case (selV, i) =>
152        ParallelPosteriorityMux(
153          true.B +: selV.map(x => x),
154          getByte(oldData, i) +: Seq(getByte(newData, i))
155        )}).asUInt
156    }
157  }
158
159  /**
160    * for merge Unit-Stride data to 256-bits
161    * merge 128-bits data to 256-bits
162    * if have 3 port,
163    *   if is port0, it is 6 to 1 Multiplexer -> (128'b0, data) or (data, 128'b0) or (data, port2data) or (port2data, data) or (data, port3data) or (port3data, data)
164    *   if is port1, it is 4 to 1 Multiplexer -> (128'b0, data) or (data, 128'b0) or (data, port3data) or (port3data, data)
165    *   if is port3, it is 2 to 1 Multiplexer -> (128'b0, data) or (data, 128'b0)
166    *
167    */
168  object mergeDataByIndex{
169    def apply(data:  Seq[UInt], mask: Seq[UInt], index: UInt, valids: Seq[Bool]): (UInt, UInt) = {
170      require(data.length == valids.length)
171      require(data.length == mask.length)
172      val muxLength = data.length
173      val selDataMatrix = Wire(Vec(muxLength, Vec(2, UInt((VLEN * 2).W)))) // 3 * 2 * 256
174      val selMaskMatrix = Wire(Vec(muxLength, Vec(2, UInt((VLENB * 2).W)))) // 3 * 2 * 16
175      dontTouch(selDataMatrix)
176      dontTouch(selMaskMatrix)
177      for(i <- 0 until muxLength){
178        if(i == 0){
179          selDataMatrix(i)(0) := Cat(0.U(VLEN.W), data(i))
180          selDataMatrix(i)(1) := Cat(data(i), 0.U(VLEN.W))
181          selMaskMatrix(i)(0) := Cat(0.U(VLENB.W), mask(i))
182          selMaskMatrix(i)(1) := Cat(mask(i), 0.U(VLENB.W))
183        }
184        else{
185          selDataMatrix(i)(0) := Cat(data(i), data(0))
186          selDataMatrix(i)(1) := Cat(data(0), data(i))
187          selMaskMatrix(i)(0) := Cat(mask(i), mask(0))
188          selMaskMatrix(i)(1) := Cat(mask(0), mask(i))
189        }
190      }
191      val selIdxVec = (0 until muxLength).map(_.U)
192      val selIdx    = PriorityMux(valids.reverse, selIdxVec.reverse)
193
194      val selData = Mux(index === 0.U,
195                        selDataMatrix(selIdx)(0),
196                        selDataMatrix(selIdx)(1))
197      val selMask = Mux(index === 0.U,
198                        selMaskMatrix(selIdx)(0),
199                        selMaskMatrix(selIdx)(1))
200      (selData, selMask)
201    }
202  }
203  def mergeDataByIndex(data:  UInt, mask: UInt, index: UInt): (UInt, UInt) = {
204    mergeDataByIndex(Seq(data), Seq(mask), index, Seq(true.B))
205  }
206}
207abstract class VLSUModule(implicit p: Parameters) extends XSModule
208  with HasVLSUParameters
209  with HasCircularQueuePtrHelper
210abstract class VLSUBundle(implicit p: Parameters) extends XSBundle
211  with HasVLSUParameters
212
213class VLSUBundleWithMicroOp(implicit p: Parameters) extends VLSUBundle {
214  val uop = new DynInst
215}
216
217class OnlyVecExuOutput(implicit p: Parameters) extends VLSUBundle {
218  val isvec = Bool()
219  val vecdata = UInt(VLEN.W)
220  val mask = UInt(VLENB.W)
221  // val rob_idx_valid = Vec(2, Bool())
222  // val inner_idx = Vec(2, UInt(3.W))
223  // val rob_idx = Vec(2, new RobPtr)
224  // val offset = Vec(2, UInt(4.W))
225  val reg_offset = UInt(vOffsetBits.W)
226  val vecActive = Bool() // 1: vector active element, 0: vector not active element
227  val is_first_ele = Bool()
228  val elemIdx = UInt(elemIdxBits.W) // element index
229  val elemIdxInsideVd = UInt(elemIdxBits.W) // element index in scope of vd
230  val trigger = TriggerAction()
231  val vecVaddrOffset = UInt(VAddrBits.W)
232  val vecTriggerMask = UInt((VLEN/8).W)
233  // val uopQueuePtr = new VluopPtr
234  // val flowPtr = new VlflowPtr
235}
236
237class VecExuOutput(implicit p: Parameters) extends MemExuOutput with HasVLSUParameters {
238  val vec = new OnlyVecExuOutput
239  val alignedType       = UInt(alignTypeBits.W)
240   // feedback
241  val vecFeedback       = Bool()
242}
243
244class VecUopBundle(implicit p: Parameters) extends VLSUBundleWithMicroOp {
245  val flowMask       = UInt(VLENB.W) // each bit for a flow
246  val byteMask       = UInt(VLENB.W) // each bit for a byte
247  val data           = UInt(VLEN.W)
248  // val fof            = Bool() // fof is only used for vector loads
249  val excp_eew_index = UInt(elemIdxBits.W)
250  // val exceptionVec   = ExceptionVec() // uop has exceptionVec
251  val baseAddr = UInt(VAddrBits.W)
252  val stride = UInt(VLEN.W)
253  val flow_counter = UInt(flowIdxBits.W)
254
255  // instruction decode result
256  val flowNum = UInt(flowIdxBits.W) // # of flows in a uop
257  // val flowNumLog2 = UInt(log2Up(flowIdxBits).W) // log2(flowNum), for better timing of multiplication
258  val nfields = UInt(fieldBits.W) // NFIELDS
259  val vm = Bool() // whether vector masking is enabled
260  val usWholeReg = Bool() // unit-stride, whole register load
261  val usMaskReg = Bool() // unit-stride, masked store/load
262  val eew = VEew() // size of memory elements
263  val sew = UInt(ewBits.W)
264  val emul = UInt(mulBits.W)
265  val lmul = UInt(mulBits.W)
266  val vlmax = UInt(elemIdxBits.W)
267  val instType = UInt(3.W)
268  val vd_last_uop = Bool()
269  val vd_first_uop = Bool()
270}
271
272class VecFlowBundle(implicit p: Parameters) extends VLSUBundleWithMicroOp {
273  val vaddr             = UInt(VAddrBits.W)
274  val mask              = UInt(VLENB.W)
275  val alignedType       = UInt(alignTypeBits.W)
276  val vecActive         = Bool()
277  val elemIdx           = UInt(elemIdxBits.W)
278  val is_first_ele      = Bool()
279
280  // pack
281  val isPackage         = Bool()
282  val packageNum        = UInt((log2Up(VLENB) + 1).W)
283  val originAlignedType = UInt(alignTypeBits.W)
284}
285
286class VecMemExuOutput(isVector: Boolean = false)(implicit p: Parameters) extends VLSUBundle{
287  val output = new MemExuOutput(isVector)
288  val vecFeedback = Bool()
289  val mmio = Bool()
290  val usSecondInv = Bool()
291  val elemIdx = UInt(elemIdxBits.W)
292  val alignedType = UInt(alignTypeBits.W)
293  val mbIndex     = UInt(vsmBindexBits.W)
294  val mask        = UInt(VLENB.W)
295  val vaddr       = UInt(XLEN.W)
296  val vaNeedExt   = Bool()
297  val gpaddr      = UInt(GPAddrBits.W)
298  val isForVSnonLeafPTE = Bool()
299  val vecVaddrOffset = UInt(VAddrBits.W)
300  val vecTriggerMask = UInt((VLEN/8).W)
301}
302
303object MulNum {
304  def apply (mul: UInt): UInt = { //mul means emul or lmul
305    (LookupTree(mul,List(
306      "b101".U -> 1.U , // 1/8
307      "b110".U -> 1.U , // 1/4
308      "b111".U -> 1.U , // 1/2
309      "b000".U -> 1.U , // 1
310      "b001".U -> 2.U , // 2
311      "b010".U -> 4.U , // 4
312      "b011".U -> 8.U   // 8
313    )))}
314}
315/**
316  * when emul is greater than or equal to 1, this means the entire register needs to be written;
317  * otherwise, only write the specified number of bytes */
318object MulDataSize {
319  def apply (mul: UInt): UInt = { //mul means emul or lmul
320    (LookupTree(mul,List(
321      "b101".U -> 2.U  , // 1/8
322      "b110".U -> 4.U  , // 1/4
323      "b111".U -> 8.U  , // 1/2
324      "b000".U -> 16.U , // 1
325      "b001".U -> 16.U , // 2
326      "b010".U -> 16.U , // 4
327      "b011".U -> 16.U   // 8
328    )))}
329}
330
331object OneRegNum {
332  def apply (eew: UInt): UInt = { //mul means emul or lmul
333    require(eew.getWidth == 2, "The eew width must be 2.")
334    (LookupTree(eew, List(
335      "b00".U -> 16.U , // 1
336      "b01".U ->  8.U , // 2
337      "b10".U ->  4.U , // 4
338      "b11".U ->  2.U   // 8
339    )))}
340}
341
342//index inst read data byte
343object SewDataSize {
344  def apply (sew: UInt): UInt = {
345    (LookupTree(sew,List(
346      "b000".U -> 1.U , // 1
347      "b001".U -> 2.U , // 2
348      "b010".U -> 4.U , // 4
349      "b011".U -> 8.U   // 8
350    )))}
351}
352
353// strided inst read data byte
354object EewDataSize {
355  def apply (eew: UInt): UInt = {
356    require(eew.getWidth == 2, "The eew width must be 2.")
357    (LookupTree(eew, List(
358      "b00".U -> 1.U , // 1
359      "b01".U -> 2.U , // 2
360      "b10".U -> 4.U , // 4
361      "b11".U -> 8.U   // 8
362    )))}
363}
364
365object loadDataSize {
366  def apply (instType: UInt, emul: UInt, eew: UInt, sew: UInt): UInt = {
367    (LookupTree(instType,List(
368      "b000".U ->  MulDataSize(emul), // unit-stride
369      "b010".U ->  EewDataSize(eew)  , // strided
370      "b001".U ->  SewDataSize(sew)  , // indexed-unordered
371      "b011".U ->  SewDataSize(sew)  , // indexed-ordered
372      "b100".U ->  EewDataSize(eew)  , // segment unit-stride
373      "b110".U ->  EewDataSize(eew)  , // segment strided
374      "b101".U ->  SewDataSize(sew)  , // segment indexed-unordered
375      "b111".U ->  SewDataSize(sew)    // segment indexed-ordered
376    )))}
377}
378
379object storeDataSize {
380  def apply (instType: UInt, eew: UInt, sew: UInt): UInt = {
381    (LookupTree(instType,List(
382      "b000".U ->  EewDataSize(eew)  , // unit-stride, do not use
383      "b010".U ->  EewDataSize(eew)  , // strided
384      "b001".U ->  SewDataSize(sew)  , // indexed-unordered
385      "b011".U ->  SewDataSize(sew)  , // indexed-ordered
386      "b100".U ->  EewDataSize(eew)  , // segment unit-stride
387      "b110".U ->  EewDataSize(eew)  , // segment strided
388      "b101".U ->  SewDataSize(sew)  , // segment indexed-unordered
389      "b111".U ->  SewDataSize(sew)    // segment indexed-ordered
390    )))}
391}
392
393/**
394  * these are used to obtain immediate addresses for  index instruction */
395object EewEq8 {
396  def apply(index:UInt, flow_inner_idx: UInt): UInt = {
397    (LookupTree(flow_inner_idx,List(
398      0.U  -> index(7 ,0   ),
399      1.U  -> index(15,8   ),
400      2.U  -> index(23,16  ),
401      3.U  -> index(31,24  ),
402      4.U  -> index(39,32  ),
403      5.U  -> index(47,40  ),
404      6.U  -> index(55,48  ),
405      7.U  -> index(63,56  ),
406      8.U  -> index(71,64  ),
407      9.U  -> index(79,72  ),
408      10.U -> index(87,80  ),
409      11.U -> index(95,88  ),
410      12.U -> index(103,96 ),
411      13.U -> index(111,104),
412      14.U -> index(119,112),
413      15.U -> index(127,120)
414    )))}
415}
416
417object EewEq16 {
418  def apply(index: UInt, flow_inner_idx: UInt): UInt = {
419    (LookupTree(flow_inner_idx, List(
420      0.U -> index(15, 0),
421      1.U -> index(31, 16),
422      2.U -> index(47, 32),
423      3.U -> index(63, 48),
424      4.U -> index(79, 64),
425      5.U -> index(95, 80),
426      6.U -> index(111, 96),
427      7.U -> index(127, 112)
428    )))}
429}
430
431object EewEq32 {
432  def apply(index: UInt, flow_inner_idx: UInt): UInt = {
433    (LookupTree(flow_inner_idx, List(
434      0.U -> index(31, 0),
435      1.U -> index(63, 32),
436      2.U -> index(95, 64),
437      3.U -> index(127, 96)
438    )))}
439}
440
441object EewEq64 {
442  def apply (index: UInt, flow_inner_idx: UInt): UInt = {
443    (LookupTree(flow_inner_idx, List(
444      0.U -> index(63, 0),
445      1.U -> index(127, 64)
446    )))}
447}
448
449object IndexAddr {
450  def apply (index: UInt, flow_inner_idx: UInt, eew: UInt): UInt = {
451    require(eew.getWidth == 2, "The eew width must be 2.")
452    (LookupTree(eew, List(
453      "b00".U -> EewEq8 (index = index, flow_inner_idx = flow_inner_idx ), // Imm is 1 Byte // TODO: index maybe cross register
454      "b01".U -> EewEq16(index = index, flow_inner_idx = flow_inner_idx ), // Imm is 2 Byte
455      "b10".U -> EewEq32(index = index, flow_inner_idx = flow_inner_idx ), // Imm is 4 Byte
456      "b11".U -> EewEq64(index = index, flow_inner_idx = flow_inner_idx )  // Imm is 8 Byte
457    )))}
458}
459
460object Log2Num {
461  def apply (num: UInt): UInt = {
462    (LookupTree(num,List(
463      16.U -> 4.U,
464      8.U  -> 3.U,
465      4.U  -> 2.U,
466      2.U  -> 1.U,
467      1.U  -> 0.U
468    )))}
469}
470
471object GenUopIdxInField {
472  /**
473   * Used in normal vector instruction
474   * */
475  def apply (instType: UInt, emul: UInt, lmul: UInt, uopIdx: UInt): UInt = {
476    val isIndexed = instType(0)
477    val mulInField = Mux(
478      isIndexed,
479      Mux(lmul.asSInt > emul.asSInt, lmul, emul),
480      emul
481    )
482    LookupTree(mulInField, List(
483      "b101".U -> 0.U,
484      "b110".U -> 0.U,
485      "b111".U -> 0.U,
486      "b000".U -> 0.U,
487      "b001".U -> uopIdx(0),
488      "b010".U -> uopIdx(1, 0),
489      "b011".U -> uopIdx(2, 0)
490    ))
491  }
492  /**
493   *  Only used in segment instruction.
494   * */
495  def apply (select: UInt, uopIdx: UInt): UInt = {
496    LookupTree(select, List(
497      "b101".U -> 0.U,
498      "b110".U -> 0.U,
499      "b111".U -> 0.U,
500      "b000".U -> 0.U,
501      "b001".U -> uopIdx(0),
502      "b010".U -> uopIdx(1, 0),
503      "b011".U -> uopIdx(2, 0)
504    ))
505  }
506}
507
508//eew decode
509object EewLog2 extends VLSUConstants {
510  // def apply (eew: UInt): UInt = {
511  //   (LookupTree(eew,List(
512  //     "b000".U -> "b000".U , // 1
513  //     "b101".U -> "b001".U , // 2
514  //     "b110".U -> "b010".U , // 4
515  //     "b111".U -> "b011".U   // 8
516  //   )))}
517  def apply(eew: UInt): UInt = {
518    require(eew.getWidth == 2, "The eew width must be 2.")
519    ZeroExt(eew, ewBits)
520  }
521}
522
523object GenRealFlowNum {
524  /**
525   * unit-stride instructions don't use this method;
526   * other instructions generate realFlowNum by EmulDataSize >> eew,
527   * EmulDataSize means the number of bytes that need to be written to the register,
528   * eew means the number of bytes written at once.
529   *
530   * @param instType As the name implies.
531   * @param emul As the name implies.
532   * @param lmul As the name implies.
533   * @param eew As the name implies.
534   * @param sew As the name implies.
535   * @param isSegment Only modules related to segment need to be set to true.
536   * @return FlowNum of instruction.
537   *
538   */
539  def apply (instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt, isSegment: Boolean = false): UInt = {
540    require(instType.getWidth == 3, "The instType width must be 3, (isSegment, mop)")
541    require(eew.getWidth == 2, "The eew width must be 2.")
542    // Because the new segmentunit is needed. But the previous implementation is retained for the time being in case of emergency.
543    val segmentIndexFlowNum =  if (isSegment) (MulDataSize(lmul) >> sew(1,0)).asUInt
544    else Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt)
545    (LookupTree(instType,List(
546      "b000".U ->  (MulDataSize(emul) >> eew).asUInt, // store use, load do not use
547      "b010".U ->  (MulDataSize(emul) >> eew).asUInt, // strided
548      "b001".U ->  Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt), // indexed-unordered
549      "b011".U ->  Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt), // indexed-ordered
550      "b100".U ->  (MulDataSize(emul) >> eew).asUInt, // segment unit-stride
551      "b110".U ->  (MulDataSize(emul) >> eew).asUInt, // segment strided
552      "b101".U ->  segmentIndexFlowNum, // segment indexed-unordered
553      "b111".U ->  segmentIndexFlowNum  // segment indexed-ordered
554    )))}
555}
556
557object GenRealFlowLog2 extends VLSUConstants {
558  /**
559   * GenRealFlowLog2 = Log2(GenRealFlowNum)
560   *
561   * @param instType As the name implies.
562   * @param emul As the name implies.
563   * @param lmul As the name implies.
564   * @param eew As the name implies.
565   * @param sew As the name implies.
566   * @param isSegment Only modules related to segment need to be set to true.
567   * @return FlowNumLog2 of instruction.
568   */
569  def apply(instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt, isSegment: Boolean = false): UInt = {
570    require(instType.getWidth == 3, "The instType width must be 3, (isSegment, mop)")
571    require(eew.getWidth == 2, "The eew width must be 2.")
572    val emulLog2 = Mux(emul.asSInt >= 0.S, 0.U, emul)
573    val lmulLog2 = Mux(lmul.asSInt >= 0.S, 0.U, lmul)
574    val eewRealFlowLog2 = emulLog2 + log2Up(VLENB).U - eew
575    val sewRealFlowLog2 = lmulLog2 + log2Up(VLENB).U - sew(1, 0)
576    // Because the new segmentunit is needed. But the previous implementation is retained for the time being in case of emergency.
577    val segmentIndexFlowLog2 = if (isSegment) sewRealFlowLog2 else Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2)
578    (LookupTree(instType, List(
579      "b000".U -> eewRealFlowLog2, // unit-stride
580      "b010".U -> eewRealFlowLog2, // strided
581      "b001".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // indexed-unordered
582      "b011".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // indexed-ordered
583      "b100".U -> eewRealFlowLog2, // segment unit-stride
584      "b110".U -> eewRealFlowLog2, // segment strided
585      "b101".U -> segmentIndexFlowLog2, // segment indexed-unordered
586      "b111".U -> segmentIndexFlowLog2, // segment indexed-ordered
587    )))
588  }
589}
590
591/**
592  * GenElemIdx generals an element index within an instruction, given a certain uopIdx and a known flowIdx
593  * inside the uop.
594  */
595object GenElemIdx extends VLSUConstants {
596  def apply(instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt,
597            uopIdx: UInt, flowIdx: UInt): UInt = {
598    require(eew.getWidth == 2, "The eew width must be 2.")
599    val isIndexed = instType(0).asBool
600    val eewUopFlowsLog2 = Mux(emul.asSInt > 0.S, 0.U, emul) + log2Up(VLENB).U - eew
601    val sewUopFlowsLog2 = Mux(lmul.asSInt > 0.S, 0.U, lmul) + log2Up(VLENB).U - sew(1, 0)
602    val uopFlowsLog2 = Mux(
603      isIndexed,
604      Mux(emul.asSInt > lmul.asSInt, eewUopFlowsLog2, sewUopFlowsLog2),
605      eewUopFlowsLog2
606    )
607    LookupTree(uopFlowsLog2, List(
608      0.U -> uopIdx,
609      1.U -> uopIdx ## flowIdx(0),
610      2.U -> uopIdx ## flowIdx(1, 0),
611      3.U -> uopIdx ## flowIdx(2, 0),
612      4.U -> uopIdx ## flowIdx(3, 0)
613    ))
614  }
615}
616
617/**
618  * GenVLMAX calculates VLMAX, which equals MUL * ew
619  */
620object GenVLMAXLog2 extends VLSUConstants {
621  def apply(lmul: UInt, sew: UInt): UInt = lmul + log2Up(VLENB).U - sew
622}
623object GenVLMAX {
624  def apply(lmul: UInt, sew: UInt): UInt = 1.U << GenVLMAXLog2(lmul, sew)
625}
626/**
627 * generate mask base on vlmax
628 * example: vlmax = b100, max = b011
629 * */
630object GenVlMaxMask{
631  def apply(vlmax: UInt, length: Int): UInt = (vlmax - 1.U)(length-1, 0)
632}
633
634object GenUSWholeRegVL extends VLSUConstants {
635  def apply(nfields: UInt, eew: UInt): UInt = {
636    require(eew.getWidth == 2, "The eew width must be 2.")
637    LookupTree(eew, List(
638      "b00".U -> (nfields << (log2Up(VLENB) - 0)),
639      "b01".U -> (nfields << (log2Up(VLENB) - 1)),
640      "b10".U -> (nfields << (log2Up(VLENB) - 2)),
641      "b11".U -> (nfields << (log2Up(VLENB) - 3))
642    ))
643  }
644}
645object GenUSWholeEmul extends VLSUConstants{
646  def apply(nf: UInt): UInt={
647    LookupTree(nf,List(
648      "b000".U -> "b000".U(mulBits.W),
649      "b001".U -> "b001".U(mulBits.W),
650      "b011".U -> "b010".U(mulBits.W),
651      "b111".U -> "b011".U(mulBits.W)
652    ))
653  }
654}
655
656
657object GenUSMaskRegVL extends VLSUConstants {
658  def apply(vl: UInt): UInt = {
659    Mux(vl(2,0) === 0.U , (vl >> 3.U), ((vl >> 3.U) + 1.U))
660  }
661}
662
663object GenUopByteMask {
664  def apply(flowMask: UInt, alignedType: UInt): UInt = {
665    LookupTree(alignedType, List(
666      "b000".U -> flowMask,
667      "b001".U -> FillInterleaved(2, flowMask),
668      "b010".U -> FillInterleaved(4, flowMask),
669      "b011".U -> FillInterleaved(8, flowMask),
670      "b100".U -> FillInterleaved(16, flowMask)
671    ))
672  }
673}
674
675object GenVdIdxInField extends VLSUConstants {
676  def apply(instType: UInt, emul: UInt, lmul: UInt, uopIdx: UInt): UInt = {
677    val vdIdx = Wire(UInt(log2Up(maxMUL).W))
678    when (instType(1,0) === "b00".U || instType(1,0) === "b10".U || lmul.asSInt > emul.asSInt) {
679      // Unit-stride or Strided, or indexed with lmul >= emul
680      vdIdx := uopIdx
681    }.otherwise {
682      // Indexed with lmul <= emul
683      val multiple = emul - lmul
684      val uopIdxWidth = uopIdx.getWidth
685      vdIdx := LookupTree(multiple, List(
686        0.U -> uopIdx,
687        1.U -> (uopIdx >> 1),
688        2.U -> (uopIdx >> 2),
689        3.U -> (uopIdx >> 3)
690      ))
691    }
692    vdIdx
693  }
694}
695/**
696* Use start and vl to generate flow activative mask
697* mod = true fill 0
698* mod = false fill 1
699*/
700object GenFlowMask extends VLSUConstants {
701  def apply(elementMask: UInt, start: UInt, vl: UInt , mod: Boolean): UInt = {
702    val startMask = ~UIntToMask(start, VLEN)
703    val vlMask = UIntToMask(vl, VLEN)
704    val maskVlStart = vlMask & startMask
705    if(mod){
706      elementMask & maskVlStart
707    }
708    else{
709      (~elementMask).asUInt & maskVlStart
710    }
711  }
712}
713
714object genVWmask128 {
715  def apply(addr: UInt, sizeEncode: UInt): UInt = {
716    (LookupTree(sizeEncode, List(
717      "b000".U -> 0x1.U, //0001 << addr(2:0)
718      "b001".U -> 0x3.U, //0011
719      "b010".U -> 0xf.U, //1111
720      "b011".U -> 0xff.U, //11111111
721      "b100".U -> 0xffff.U //1111111111111111
722    )) << addr(3, 0)).asUInt
723  }
724}
725/*
726* only use in max length is 128
727*/
728object genVWdata {
729  def apply(data: UInt, sizeEncode: UInt): UInt = {
730    LookupTree(sizeEncode, List(
731      "b000".U -> Fill(16, data(7, 0)),
732      "b001".U -> Fill(8, data(15, 0)),
733      "b010".U -> Fill(4, data(31, 0)),
734      "b011".U -> Fill(2, data(63,0)),
735      "b100".U -> data(127,0)
736    ))
737  }
738}
739
740object genUSSplitAddr{
741  def apply(addr: UInt, index: UInt, width: Int): UInt = {
742    val tmpAddr = Cat(addr(width - 1, 4), 0.U(4.W))
743    val nextCacheline = tmpAddr + 16.U
744    LookupTree(index, List(
745      0.U -> tmpAddr,
746      1.U -> nextCacheline
747    ))
748  }
749}
750
751object genUSSplitMask{
752  def apply(mask: UInt, index: UInt): UInt = {
753    require(mask.getWidth == 32) // need to be 32-bits
754    LookupTree(index, List(
755      0.U -> mask(15, 0),
756      1.U -> mask(31, 16),
757    ))
758  }
759}
760
761object genUSSplitData{
762  def apply(data: UInt, index: UInt, addrOffset: UInt): UInt = {
763    val tmpData = WireInit(0.U(256.W))
764    val lookupTable = (0 until 16).map{case i =>
765      if(i == 0){
766        i.U -> Cat(0.U(128.W), data)
767      }else{
768        i.U -> Cat(0.U(((16-i)*8).W), data, 0.U((i*8).W))
769      }
770    }
771    tmpData := LookupTree(addrOffset, lookupTable).asUInt
772
773    LookupTree(index, List(
774      0.U -> tmpData(127, 0),
775      1.U -> tmpData(255, 128)
776    ))
777  }
778}
779
780object genVSData extends VLSUConstants {
781  def apply(data: UInt, elemIdx: UInt, alignedType: UInt): UInt = {
782    LookupTree(alignedType, List(
783      "b000".U -> ZeroExt(LookupTree(elemIdx(3, 0), List.tabulate(VLEN/8)(i => i.U -> getByte(data, i))), VLEN),
784      "b001".U -> ZeroExt(LookupTree(elemIdx(2, 0), List.tabulate(VLEN/16)(i => i.U -> getHalfWord(data, i))), VLEN),
785      "b010".U -> ZeroExt(LookupTree(elemIdx(1, 0), List.tabulate(VLEN/32)(i => i.U -> getWord(data, i))), VLEN),
786      "b011".U -> ZeroExt(LookupTree(elemIdx(0), List.tabulate(VLEN/64)(i => i.U -> getDoubleWord(data, i))), VLEN),
787      "b100".U -> data // if have wider element, it will broken
788    ))
789  }
790}
791
792// TODO: more elegant
793object genVStride extends VLSUConstants {
794  def apply(uopIdx: UInt, stride: UInt): UInt = {
795    LookupTree(uopIdx, List(
796      0.U -> 0.U,
797      1.U -> stride,
798      2.U -> (stride << 1),
799      3.U -> ((stride << 1).asUInt + stride),
800      4.U -> (stride << 2),
801      5.U -> ((stride << 2).asUInt + stride),
802      6.U -> ((stride << 2).asUInt + (stride << 1)),
803      7.U -> ((stride << 2).asUInt + (stride << 1) + stride)
804    ))
805  }
806}
807/**
808 * generate uopOffset, not used in segment instruction
809 * */
810object genVUopOffset extends VLSUConstants {
811  def apply(instType: UInt, isfof: Bool, uopidx: UInt, nf: UInt, eew: UInt, stride: UInt, alignedType: UInt): UInt = {
812    val uopInsidefield = (uopidx >> nf).asUInt // when nf == 0, is uopidx
813
814    val fofVUopOffset = (LookupTree(instType,List(
815      "b000".U -> ( genVStride(uopInsidefield, stride) << (log2Up(VLENB).U - eew)   ) , // unit-stride fof
816      "b100".U -> ( genVStride(uopInsidefield, stride) << (log2Up(VLENB).U - eew)   ) , // segment unit-stride fof
817    ))).asUInt
818
819    val otherVUopOffset = (LookupTree(instType,List(
820      "b000".U -> ( uopInsidefield << alignedType                                   ) , // unit-stride
821      "b010".U -> ( genVStride(uopInsidefield, stride) << (log2Up(VLENB).U - eew)   ) , // strided
822      "b001".U -> ( 0.U                                                             ) , // indexed-unordered
823      "b011".U -> ( 0.U                                                             ) , // indexed-ordered
824      "b100".U -> ( uopInsidefield << alignedType                                   ) , // segment unit-stride
825      "b110".U -> ( genVStride(uopInsidefield, stride) << (log2Up(VLENB).U - eew)   ) , // segment strided
826      "b101".U -> ( 0.U                                                             ) , // segment indexed-unordered
827      "b111".U -> ( 0.U                                                             )   // segment indexed-ordered
828    ))).asUInt
829
830    Mux(isfof, fofVUopOffset, otherVUopOffset)
831  }
832}
833
834
835
836object genVFirstUnmask extends VLSUConstants {
837  /**
838   * Find the lowest unmasked number of bits.
839   * example:
840   *   mask = 16'b1111_1111_1110_0000
841   *   return 5
842   * @param mask 16bits of mask.
843   * @return lowest unmasked number of bits.
844   */
845  def apply(mask: UInt): UInt = {
846    require(mask.getWidth == 16, "The mask width must be 16")
847    val select = (0 until 16).zip(mask.asBools).map{case (i, v) =>
848      (v, i.U)
849    }
850    PriorityMuxDefault(select, 0.U)
851  }
852
853  def apply(mask: UInt, regOffset: UInt): UInt = {
854    require(mask.getWidth == 16, "The mask width must be 16")
855    val realMask = (mask >> regOffset).asUInt
856    val select = (0 until 16).zip(realMask.asBools).map{case (i, v) =>
857      (v, i.U)
858    }
859    PriorityMuxDefault(select, 0.U)
860  }
861}
862
863class skidBufferConnect[T <: Data](gen: T) extends Module {
864  val io = IO(new Bundle() {
865    val in = Flipped(DecoupledIO(gen.cloneType))
866    val flush = Input(Bool())
867    val out = DecoupledIO(gen.cloneType)
868  })
869
870  skidBuffer.connect(io.in, io.out, io.flush)
871}
872
873object skidBuffer{
874  /*
875  * Skid Buffer used to break timing path of ready
876  * */
877  def connect[T <: Data](
878                          in: DecoupledIO[T],
879                          out: DecoupledIO[T],
880                          flush: Bool
881                        ): T = {
882    val empty :: skid :: Nil = Enum(2)
883    val state      = RegInit(empty)
884    val stateNext  = WireInit(empty)
885    val dataBuffer = RegEnable(in.bits, (!out.ready && in.fire))
886
887    when(state === empty){
888      stateNext := Mux(!out.ready && in.fire && !flush, skid, empty)
889    }.elsewhen(state === skid){
890      stateNext := Mux(out.ready || flush, empty, skid)
891    }
892    state     := stateNext
893
894    in.ready  := state === empty
895    out.bits  := Mux(state === skid, dataBuffer, in.bits)
896    out.valid := in.valid || (state === skid)
897
898    dataBuffer
899  }
900  def apply[T <: Data](
901                        in: DecoupledIO[T],
902                        out: DecoupledIO[T],
903                        flush: Bool,
904                        moduleName: String
905                      ): Unit = {
906    val buffer = Module(new skidBufferConnect(in.bits))
907    buffer.suggestName(moduleName)
908    buffer.io.in <> in
909    buffer.io.flush := flush
910    out <> buffer.io.out
911  }
912}
913
914