xref: /XiangShan/src/main/scala/xiangshan/mem/vector/VSplit.scala (revision e8b68a8e55c66f0e214067411a892f85b725df09)
1/***************************************************************************************
2  * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3  * Copyright (c) 2020-2021 Peng Cheng Laboratory
4  *
5  * XiangShan is licensed under Mulan PSL v2.
6  * You can use this software according to the terms and conditions of the Mulan PSL v2.
7  * You may obtain a copy of Mulan PSL v2 at:
8  *          http://license.coscl.org.cn/MulanPSL2
9  *
10  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11  * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12  * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13  *
14  * See the Mulan PSL v2 for more details.
15  ***************************************************************************************/
16
17package xiangshan.mem
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import utility._
24import xiangshan._
25import xiangshan.backend.rob.RobPtr
26import xiangshan.backend.Bundles._
27import xiangshan.mem._
28import xiangshan.backend.fu.vector.Bundles._
29
30
31class VSplitPipeline(isVStore: Boolean = false)(implicit p: Parameters) extends VLSUModule{
32  val io = IO(new VSplitPipelineIO(isVStore))
33  // will be override later
34  def us_whole_reg(fuOpType: UInt): Bool = false.B
35  def us_mask(fuOpType: UInt): Bool = false.B
36  def us_fof(fuOpType: UInt): Bool = false.B
37
38  val vdIdxReg = RegInit(0.U(3.W))
39
40  val s1_ready = WireInit(false.B)
41  io.in.ready := s1_ready
42
43  /**-----------------------------------------------------------
44    * s0 stage
45    * decode and generate AlignedType, uop mask, preIsSplit
46    * ----------------------------------------------------------
47    */
48  val s0_vtype = io.in.bits.uop.vpu.vtype
49  val s0_sew = s0_vtype.vsew
50  val s0_eew = io.in.bits.uop.vpu.veew
51  val s0_lmul = s0_vtype.vlmul
52  // when load whole register or unit-stride masked , emul should be 1
53  val s0_fuOpType = io.in.bits.uop.fuOpType
54  val s0_mop = s0_fuOpType(6, 5)
55  val s0_nf = Mux(us_whole_reg(s0_fuOpType), 0.U, io.in.bits.uop.vpu.nf)
56  val s0_vm = io.in.bits.uop.vpu.vm
57  val s0_emul = Mux(us_whole_reg(s0_fuOpType) ,GenUSWholeEmul(io.in.bits.uop.vpu.nf), Mux(us_mask(s0_fuOpType), 0.U(mulBits.W), EewLog2(s0_eew) - s0_sew + s0_lmul))
58  val s0_preIsSplit = !(isUnitStride(s0_mop) && !us_fof(s0_fuOpType))
59  val s0_nfield        = s0_nf +& 1.U
60
61  val s0_valid         = Wire(Bool())
62  val s0_kill          = io.in.bits.uop.robIdx.needFlush(io.redirect)
63  val s0_can_go        = s1_ready
64  val s0_fire          = s0_valid && s0_can_go
65  val s0_out           = Wire(new VLSBundle(isVStore))
66
67  val isUsWholeReg = isUnitStride(s0_mop) && us_whole_reg(s0_fuOpType)
68  val isMaskReg = isUnitStride(s0_mop) && us_mask(s0_fuOpType)
69  val isSegment = s0_nf =/= 0.U && !us_whole_reg(s0_fuOpType)
70  val instType = Cat(isSegment, s0_mop)
71  val uopIdx = io.in.bits.uop.vpu.vuopIdx
72  val uopIdxInField = GenUopIdxInField(instType, s0_emul, s0_lmul, uopIdx)
73  val vdIdxInField = GenVdIdxInField(instType, s0_emul, s0_lmul, uopIdxInField)
74  val lmulLog2 = Mux(s0_lmul.asSInt >= 0.S, 0.U, s0_lmul)
75  val emulLog2 = Mux(s0_emul.asSInt >= 0.S, 0.U, s0_emul)
76  val numEewLog2 = emulLog2 - EewLog2(s0_eew)
77  val numSewLog2 = lmulLog2 - s0_sew
78  val numFlowsSameVdLog2 = Mux(
79    isIndexed(instType),
80    log2Up(VLENB).U - s0_sew(1,0),
81    log2Up(VLENB).U - s0_eew(1,0)
82  )
83  // numUops = nf * max(lmul, emul)
84  val lmulLog2Pos = Mux(s0_lmul.asSInt < 0.S, 0.U, s0_lmul)
85  val emulLog2Pos = Mux(s0_emul.asSInt < 0.S, 0.U, s0_emul)
86  val numUops = Mux(
87    isIndexed(s0_mop) && s0_lmul.asSInt > s0_emul.asSInt,
88    (s0_nf +& 1.U) << lmulLog2Pos,
89    (s0_nf +& 1.U) << emulLog2Pos
90  )
91
92  val vvl = io.in.bits.src_vl.asTypeOf(VConfig()).vl
93  val evl = Mux(isUsWholeReg,
94                GenUSWholeRegVL(io.in.bits.uop.vpu.nf +& 1.U, s0_eew),
95                Mux(isMaskReg,
96                    GenUSMaskRegVL(vvl),
97                    vvl))
98  val vvstart = io.in.bits.uop.vpu.vstart
99  val alignedType = Mux(isIndexed(instType), s0_sew(1, 0), s0_eew(1, 0))
100  val broadenAligendType = Mux(s0_preIsSplit, Cat("b0".U, alignedType), "b100".U) // if is unit-stride, use 128-bits memory access
101  val flowsLog2 = GenRealFlowLog2(instType, s0_emul, s0_lmul, s0_eew, s0_sew)
102  val flowsPrevThisUop = uopIdxInField << flowsLog2 // # of flows before this uop in a field
103  val flowsPrevThisVd = vdIdxInField << numFlowsSameVdLog2 // # of flows before this vd in a field
104  val flowsIncludeThisUop = (uopIdxInField +& 1.U) << flowsLog2 // # of flows before this uop besides this uop
105  val flowNum = io.in.bits.flowNum.get
106  val srcMask = GenFlowMask(Mux(s0_vm, Fill(VLEN, 1.U(1.W)), io.in.bits.src_mask), vvstart, evl, true)
107
108  val flowMask = ((srcMask &
109    UIntToMask(flowsIncludeThisUop.asUInt, VLEN + 1) &
110    (~UIntToMask(flowsPrevThisUop.asUInt, VLEN)).asUInt
111  ) >> flowsPrevThisVd)(VLENB - 1, 0)
112  val vlmax = GenVLMAX(s0_lmul, s0_sew)
113
114    // connect
115  s0_out := DontCare
116  s0_out match {case x =>
117    x.uop := io.in.bits.uop
118    x.uop.vpu.vl := evl
119    x.uop.uopIdx := uopIdx
120    x.uop.numUops := numUops
121    x.uop.lastUop := (uopIdx +& 1.U) === numUops
122    x.uop.vpu.nf  := s0_nf
123    x.flowMask := flowMask
124    x.byteMask := GenUopByteMask(flowMask, Cat("b0".U, alignedType))(VLENB - 1, 0)
125    x.fof := isUnitStride(s0_mop) && us_fof(s0_fuOpType)
126    x.baseAddr := io.in.bits.src_rs1
127    x.stride := io.in.bits.src_stride
128    x.flowNum := flowNum
129    x.nfields := s0_nfield
130    x.vm := s0_vm
131    x.usWholeReg := isUsWholeReg
132    x.usMaskReg := isMaskReg
133    x.eew := s0_eew
134    x.sew := s0_sew
135    x.emul := s0_emul
136    x.lmul := s0_lmul
137    x.vlmax := Mux(isUsWholeReg, evl, vlmax)
138    x.instType := instType
139    x.data := io.in.bits.src_vs3
140    x.vdIdxInField := vdIdxInField
141    x.preIsSplit  := s0_preIsSplit
142    x.alignedType := broadenAligendType
143  }
144  s0_valid := io.in.valid && !s0_kill
145  /**-------------------------------------
146    * s1 stage
147    * ------------------------------------
148    * generate UopOffset
149    */
150  val s1_valid         = RegInit(false.B)
151  val s1_kill          = Wire(Bool())
152  val s1_in            = Wire(new VLSBundle(isVStore))
153  val s1_can_go        = io.out.ready && io.toMergeBuffer.resp.valid
154  val s1_fire          = s1_valid && !s1_kill && s1_can_go
155
156  s1_ready         := s1_kill || !s1_valid || io.out.ready && io.toMergeBuffer.resp.valid
157
158  when(s0_fire){
159    s1_valid := true.B
160  }.elsewhen(s1_fire){
161    s1_valid := false.B
162  }.elsewhen(s1_kill){
163    s1_valid := false.B
164  }
165  s1_in := RegEnable(s0_out, s0_fire)
166
167  val s1_flowNum          = s1_in.flowNum
168  val s1_uopidx           = s1_in.uop.vpu.vuopIdx
169  val s1_nf               = s1_in.uop.vpu.nf
170  val s1_nfields          = s1_in.nfields
171  val s1_eew              = s1_in.eew
172  val s1_instType         = s1_in.instType
173  val s1_stride           = s1_in.stride
174  val s1_vmask            = FillInterleaved(8, s1_in.byteMask)(VLEN-1, 0)
175  val s1_alignedType      = s1_in.alignedType
176  val s1_notIndexedStride = Mux( // stride for strided/unit-stride instruction
177    isStrided(s1_instType),
178    s1_stride(XLEN - 1, 0), // for strided load, stride = x[rs2]
179    s1_nfields << s1_eew(1, 0) // for unit-stride load, stride = eew * NFIELDS
180  )
181
182  val stride     = Mux(isIndexed(s1_instType), s1_stride, s1_notIndexedStride).asUInt // if is index instructions, get index when split
183  val uopOffset  = genVUopOffset(s1_instType, s1_uopidx, s1_nf, s1_eew(1, 0), stride, s1_alignedType)
184
185  s1_kill               := s1_in.uop.robIdx.needFlush(io.redirect)
186
187  // query mergeBuffer
188  io.toMergeBuffer.req.valid             := s1_fire // only can_go will get MergeBuffer entry
189  io.toMergeBuffer.req.bits.flowNum      := Mux(s1_in.preIsSplit, PopCount(s1_in.flowMask), s1_flowNum)
190  io.toMergeBuffer.req.bits.data         := s1_in.data
191  io.toMergeBuffer.req.bits.uop          := s1_in.uop
192  io.toMergeBuffer.req.bits.mask         := s1_in.flowMask
193  io.toMergeBuffer.req.bits.vaddr        := DontCare
194  io.toMergeBuffer.req.bits.vdIdx        := vdIdxReg
195  io.toMergeBuffer.req.bits.fof          := s1_in.fof
196  io.toMergeBuffer.req.bits.vlmax        := s1_in.vlmax
197//   io.toMergeBuffer.req.bits.vdOffset :=
198
199  when (s1_in.uop.lastUop && s1_fire || s1_kill) {
200    vdIdxReg := 0.U
201  }.elsewhen(s1_fire) {
202    vdIdxReg := vdIdxReg + 1.U
203    XSError(vdIdxReg + 1.U === 0.U, s"Overflow! The number of vd should be less than 8\n")
204  }
205  // out connect
206  io.out.valid          := s1_valid && io.toMergeBuffer.resp.valid
207  io.out.bits           := s1_in
208  io.out.bits.uopOffset := uopOffset
209  io.out.bits.stride    := stride
210  io.out.bits.mBIndex   := io.toMergeBuffer.resp.bits.mBIndex
211
212  XSPerfAccumulate("split_out",     io.out.fire)
213  XSPerfAccumulate("pipe_block",    io.out.valid && !io.out.ready)
214  XSPerfAccumulate("mbuffer_block", s1_valid && io.out.ready && !io.toMergeBuffer.resp.valid)
215}
216
217abstract class VSplitBuffer(isVStore: Boolean = false)(implicit p: Parameters) extends VLSUModule{
218  val io = IO(new VSplitBufferIO(isVStore))
219
220  val bufferSize: Int
221
222  class VSplitPtr(implicit p: Parameters) extends CircularQueuePtr[VSplitPtr](bufferSize){
223  }
224
225  object VSplitPtr {
226    def apply(f: Bool, v: UInt)(implicit p: Parameters): VSplitPtr = {
227      val ptr = Wire(new VSplitPtr)
228      ptr.flag := f
229      ptr.value := v
230      ptr
231    }
232  }
233
234  val uopq = Reg(Vec(bufferSize, new VLSBundle(isVStore)))
235  val valid = RegInit(VecInit(Seq.fill(bufferSize)(false.B)))
236  val srcMaskVec = Reg(Vec(bufferSize, UInt(VLEN.W)))
237  // ptr
238  val enqPtr = RegInit(0.U.asTypeOf(new VSplitPtr))
239  val deqPtr = RegInit(0.U.asTypeOf(new VSplitPtr))
240  // for split
241  val splitIdx = RegInit(0.U(flowIdxBits.W))
242  val strideOffsetReg = RegInit(0.U(VLEN.W))
243
244  /**
245    * Redirect
246    */
247  val flushed = WireInit(VecInit(Seq.fill(bufferSize)(false.B))) // entry has been flushed by the redirect arrived in the pre 1 cycle
248  val flushVec = (valid zip flushed).zip(uopq).map { case ((v, f), entry) => v && entry.uop.robIdx.needFlush(io.redirect) && !f }
249  val flushEnq = io.in.fire && io.in.bits.uop.robIdx.needFlush(io.redirect)
250  val flushNumReg = RegNext(PopCount(flushEnq +: flushVec))
251  val redirectReg = RegNext(io.redirect)
252  val flushVecReg = RegNext(WireInit(VecInit(flushVec)))
253
254  // enqueue, if redirect, it will be flush next cycle
255  when (io.in.fire) {
256    val id = enqPtr.value
257    uopq(id) := io.in.bits
258    valid(id) := true.B
259  }
260  io.in.ready := isNotBefore(enqPtr, deqPtr)
261
262  //split uops
263  val issueValid       = valid(deqPtr.value)
264  val issueEntry       = uopq(deqPtr.value)
265  val issueMbIndex     = issueEntry.mBIndex
266  val issueFlowNum     = issueEntry.flowNum
267  val issueBaseAddr    = issueEntry.baseAddr
268  val issueUop         = issueEntry.uop
269  val issueUopIdx      = issueUop.vpu.vuopIdx
270  val issueInstType    = issueEntry.instType
271  val issueUopOffset   = issueEntry.uopOffset
272  val issueEew         = issueEntry.eew
273  val issueSew         = issueEntry.sew
274  val issueLmul        = issueEntry.lmul
275  val issueEmul        = issueEntry.emul
276  val issueAlignedType = issueEntry.alignedType
277  val issuePreIsSplit  = issueEntry.preIsSplit
278  val issueByteMask    = issueEntry.byteMask
279  val issueVLMAXMask   = issueEntry.vlmax - 1.U
280  val issueIsWholeReg  = issueEntry.usWholeReg
281  val issueVLMAXLog2 = GenVLMAXLog2(issueEntry.lmul, issueSew)
282  val elemIdx = GenElemIdx(
283    instType = issueInstType,
284    emul = issueEmul,
285    lmul = issueLmul,
286    eew = issueEew,
287    sew = issueSew,
288    uopIdx = issueUopIdx,
289    flowIdx = splitIdx
290  ) // elemIdx inside an inst, for exception
291
292  val elemIdxInsideField = elemIdx & issueVLMAXMask
293  val indexFlowInnerIdx = ((elemIdxInsideField << issueEew(1, 0))(vOffsetBits - 1, 0) >> issueEew(1, 0)).asUInt
294  val nfIdx = Mux(issueIsWholeReg, 0.U, elemIdx >> issueVLMAXLog2)
295  val fieldOffset = nfIdx << issueAlignedType // field offset inside a segment
296
297  val indexedStride    = IndexAddr( // index for indexed instruction
298    index = issueEntry.stride,
299    flow_inner_idx = indexFlowInnerIdx,
300    eew = issueEew
301  )
302  val issueStride = Mux(isIndexed(issueInstType), indexedStride, strideOffsetReg)
303  val vaddr = issueBaseAddr + issueUopOffset + issueStride
304  val mask = genVWmask128(vaddr ,issueAlignedType) // scala maske for flow
305  val flowMask = issueEntry.flowMask
306  val vecActive = (flowMask & UIntToOH(splitIdx)).orR
307  /*
308   * Unit-Stride split to one flow or two flow.
309   * for Unit-Stride, if uop's addr is aligned with 128-bits, split it to one flow, otherwise split two
310   */
311
312  val usAligned128     = (vaddr(3,0) === 0.U)// addr 128-bit aligned
313  val usSplitMask      = genUSSplitMask(issueByteMask, splitIdx, vaddr(3,0))
314  val usNoSplit        = (usAligned128 || !(vaddr(3,0) +& PopCount(usSplitMask))(4)) && !issuePreIsSplit && (splitIdx === 0.U)// unit-stride uop don't need to split into two flow
315  val usSplitVaddr     = genUSSplitAddr(vaddr, splitIdx)
316  val regOffset        = vaddr(3,0) // offset in 256-bits vd
317  XSError((splitIdx > 1.U && usNoSplit) || (splitIdx > 1.U && !issuePreIsSplit) , "Unit-Stride addr split error!\n")
318
319  // data
320  io.out.bits match { case x =>
321    x.uop                   := issueUop
322    x.vaddr                 := Mux(!issuePreIsSplit, usSplitVaddr, vaddr)
323    x.alignedType           := issueAlignedType
324    x.isvec                 := true.B
325    x.mask                  := Mux(!issuePreIsSplit, usSplitMask, mask)
326    x.reg_offset            := regOffset //for merge unit-stride data
327    x.vecActive             := vecActive
328    x.is_first_ele          := DontCare
329    x.usSecondInv           := usNoSplit
330    x.elemIdx               := elemIdx
331    x.elemIdxInsideVd       := splitIdx // if is Unit-Stride, elemIdx is the index of 2 splited mem request (for merge data)
332    x.uop_unit_stride_fof   := DontCare
333    x.isFirstIssue          := DontCare
334    x.mBIndex               := issueMbIndex
335  }
336
337    //update enqptr
338  when (redirectReg.valid && flushNumReg =/= 0.U) {
339    enqPtr := enqPtr - flushNumReg
340  }.otherwise {
341    when (io.in.fire) {
342      enqPtr := enqPtr + 1.U
343    }
344  }
345
346  // flush queue
347  for (i <- 0 until bufferSize) {
348    when(flushVecReg(i) && redirectReg.valid && flushNumReg =/= 0.U) {
349      valid(i) := false.B
350      flushed(i) := true.B
351    }
352  }
353
354 /* Execute logic */
355  /** Issue to scala pipeline**/
356  val canIssue = Wire(Bool())
357  val allowIssue = io.out.ready
358  val activeIssue = Wire(Bool())
359  val deqValid = valid(deqPtr.value)
360  val inActiveIssue = deqValid && canIssue && !vecActive && issuePreIsSplit
361  val issueCount = Mux(usNoSplit, 2.U, (PopCount(inActiveIssue) + PopCount(activeIssue))) // for dont need split unit-stride, issue two flow
362
363  // handshake
364  val thisPtr = deqPtr.value
365  canIssue := !issueUop.robIdx.needFlush(io.redirect) &&
366              !issueUop.robIdx.needFlush(redirectReg) &&
367              deqPtr < enqPtr
368  activeIssue := canIssue && allowIssue && (vecActive || !issuePreIsSplit) // active issue, current use in no unit-stride
369  when (!RegNext(io.redirect.valid) || distanceBetween(enqPtr, deqPtr) > flushNumReg) {
370    when ((splitIdx < (issueFlowNum - issueCount))) {
371      when (activeIssue || inActiveIssue) {
372        // The uop has not been entirly splited yet
373        splitIdx := splitIdx + issueCount
374        strideOffsetReg := Mux(!issuePreIsSplit, strideOffsetReg, strideOffsetReg + issueEntry.stride) // when normal unit-stride, don't use strideOffsetReg
375      }
376    }.otherwise {
377      when (activeIssue || inActiveIssue) {
378        // The uop is done spliting
379        splitIdx := 0.U(flowIdxBits.W) // initialize flowIdx
380        valid(deqPtr.value) := false.B
381        strideOffsetReg := 0.U
382        deqPtr := deqPtr + 1.U
383      }
384    }
385  }.otherwise {
386    splitIdx := 0.U(flowIdxBits.W) // initialize flowIdx
387    strideOffsetReg := 0.U
388  }
389
390  // out connect
391  io.out.valid := canIssue && (vecActive || !issuePreIsSplit) // TODO: inactive uop do not send to pipeline
392
393  XSPerfAccumulate("out_valid",             io.out.valid)
394  XSPerfAccumulate("out_fire",              io.out.fire)
395  XSPerfAccumulate("out_fire_unitstride",   io.out.fire && !issuePreIsSplit)
396  XSPerfAccumulate("unitstride_vlenAlign",  io.out.fire && !issuePreIsSplit && io.out.bits.vaddr(3, 0) === 0.U)
397  XSPerfAccumulate("unitstride_invalid",    io.out.ready && canIssue && !issuePreIsSplit && PopCount(io.out.bits.mask).orR)
398
399  QueuePerf(bufferSize, distanceBetween(enqPtr, deqPtr), !io.in.ready)
400}
401
402class VSSplitBufferImp(implicit p: Parameters) extends VSplitBuffer(isVStore = true){
403  override lazy val bufferSize = SplitBufferSize
404  // split data
405  val splitData = genVSData(
406        data = issueEntry.data.asUInt,
407        elemIdx = splitIdx,
408        alignedType = issueAlignedType
409      )
410  val flowData = genVWdata(splitData, issueAlignedType)
411  val usSplitData      = genUSSplitData(issueEntry.data.asUInt, splitIdx, vaddr(3,0))
412
413  val sqIdx = issueUop.sqIdx + splitIdx
414  io.out.bits.uop.sqIdx := sqIdx
415
416  // send data to sq
417  val vstd = io.vstd.get
418  vstd.valid := canIssue
419  vstd.bits.uop := issueUop
420  vstd.bits.uop.sqIdx := sqIdx
421  vstd.bits.data := Mux(!issuePreIsSplit, usSplitData, flowData)
422  vstd.bits.debug := DontCare
423  vstd.bits.vdIdx.get := DontCare
424  vstd.bits.vdIdxInField.get := DontCare
425  vstd.bits.mask.get := Mux(!issuePreIsSplit, usSplitMask, mask)
426
427}
428
429class VLSplitBufferImp(implicit p: Parameters) extends VSplitBuffer(isVStore = false){
430  override lazy val bufferSize = SplitBufferSize
431  io.out.bits.uop.lqIdx := issueUop.lqIdx + splitIdx
432}
433
434class VSSplitPipelineImp(implicit p: Parameters) extends VSplitPipeline(isVStore = true){
435  override def us_whole_reg(fuOpType: UInt): Bool = fuOpType === VstuType.vsr
436  override def us_mask(fuOpType: UInt): Bool      = fuOpType === VstuType.vsm
437  override def us_fof(fuOpType: UInt): Bool       = false.B // dont have vector fof store
438}
439
440class VLSplitPipelineImp(implicit p: Parameters) extends VSplitPipeline(isVStore = false){
441
442  override def us_whole_reg(fuOpType: UInt): Bool = fuOpType === VlduType.vlr
443  override def us_mask(fuOpType: UInt): Bool      = fuOpType === VlduType.vlm
444  override def us_fof(fuOpType: UInt): Bool       = fuOpType === VlduType.vleff
445}
446
447class VLSplitImp(implicit p: Parameters) extends VLSUModule{
448  val io = IO(new VSplitIO(isVStore=false))
449  val splitPipeline = Module(new VLSplitPipelineImp())
450  val splitBuffer = Module(new VLSplitBufferImp())
451  // Split Pipeline
452  splitPipeline.io.in <> io.in
453  splitPipeline.io.redirect <> io.redirect
454  io.toMergeBuffer <> splitPipeline.io.toMergeBuffer
455
456  // Split Buffer
457  splitBuffer.io.in <> splitPipeline.io.out
458  splitBuffer.io.redirect <> io.redirect
459  io.out <> splitBuffer.io.out
460}
461
462class VSSplitImp(implicit p: Parameters) extends VLSUModule{
463  val io = IO(new VSplitIO(isVStore=true))
464  val splitPipeline = Module(new VSSplitPipelineImp())
465  val splitBuffer = Module(new VSSplitBufferImp())
466  // Split Pipeline
467  splitPipeline.io.in <> io.in
468  splitPipeline.io.redirect <> io.redirect
469  io.toMergeBuffer <> splitPipeline.io.toMergeBuffer
470
471  // Split Buffer
472  splitBuffer.io.in <> splitPipeline.io.out
473  splitBuffer.io.redirect <> io.redirect
474  io.out <> splitBuffer.io.out
475  io.vstd.get <> splitBuffer.io.vstd.get
476}
477
478