1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.backend.rob.RobPtr 26import xiangshan.backend.Bundles._ 27import xiangshan.mem._ 28import xiangshan.backend.fu.vector.Bundles._ 29 30 31class VSplitPipeline(isVStore: Boolean = false)(implicit p: Parameters) extends VLSUModule{ 32 val io = IO(new VSplitPipelineIO(isVStore)) 33 // will be override later 34 def us_whole_reg(fuOpType: UInt): Bool = false.B 35 def us_mask(fuOpType: UInt): Bool = false.B 36 def us_fof(fuOpType: UInt): Bool = false.B 37 //TODO vdIdxReg should no longer be useful, don't delete it for now 38 val vdIdxReg = RegInit(0.U(3.W)) 39 40 val s1_ready = WireInit(false.B) 41 io.in.ready := s1_ready 42 43 /**----------------------------------------------------------- 44 * s0 stage 45 * decode and generate AlignedType, uop mask, preIsSplit 46 * ---------------------------------------------------------- 47 */ 48 val s0_vtype = io.in.bits.uop.vpu.vtype 49 val s0_sew = s0_vtype.vsew 50 val s0_eew = io.in.bits.uop.vpu.veew 51 val s0_lmul = s0_vtype.vlmul 52 // when load whole register or unit-stride masked , emul should be 1 53 val s0_fuOpType = io.in.bits.uop.fuOpType 54 val s0_mop = s0_fuOpType(6, 5) 55 val s0_nf = Mux(us_whole_reg(s0_fuOpType), 0.U, io.in.bits.uop.vpu.nf) 56 val s0_vm = io.in.bits.uop.vpu.vm 57 val s0_emul = Mux(us_whole_reg(s0_fuOpType) ,GenUSWholeEmul(io.in.bits.uop.vpu.nf), Mux(us_mask(s0_fuOpType), 0.U(mulBits.W), EewLog2(s0_eew) - s0_sew + s0_lmul)) 58 val s0_preIsSplit = !(isUnitStride(s0_mop) && !us_fof(s0_fuOpType)) 59 val s0_nfield = s0_nf +& 1.U 60 61 val s0_valid = Wire(Bool()) 62 val s0_kill = io.in.bits.uop.robIdx.needFlush(io.redirect) 63 val s0_can_go = s1_ready 64 val s0_fire = s0_valid && s0_can_go 65 val s0_out = Wire(new VLSBundle(isVStore)) 66 67 val isUsWholeReg = isUnitStride(s0_mop) && us_whole_reg(s0_fuOpType) 68 val isMaskReg = isUnitStride(s0_mop) && us_mask(s0_fuOpType) 69 val isSegment = s0_nf =/= 0.U && !us_whole_reg(s0_fuOpType) 70 val instType = Cat(isSegment, s0_mop) 71 val uopIdx = io.in.bits.uop.vpu.vuopIdx 72 val uopIdxInField = GenUopIdxInField(instType, s0_emul, s0_lmul, uopIdx) 73 val vdIdxInField = GenVdIdxInField(instType, s0_emul, s0_lmul, uopIdxInField) 74 val lmulLog2 = Mux(s0_lmul.asSInt >= 0.S, 0.U, s0_lmul) 75 val emulLog2 = Mux(s0_emul.asSInt >= 0.S, 0.U, s0_emul) 76 val numEewLog2 = emulLog2 - EewLog2(s0_eew) 77 val numSewLog2 = lmulLog2 - s0_sew 78 val numFlowsSameVdLog2 = Mux( 79 isIndexed(instType), 80 log2Up(VLENB).U - s0_sew(1,0), 81 log2Up(VLENB).U - s0_eew(1,0) 82 ) 83 // numUops = nf * max(lmul, emul) 84 val lmulLog2Pos = Mux(s0_lmul.asSInt < 0.S, 0.U, s0_lmul) 85 val emulLog2Pos = Mux(s0_emul.asSInt < 0.S, 0.U, s0_emul) 86 val numUops = Mux( 87 isIndexed(s0_mop) && s0_lmul.asSInt > s0_emul.asSInt, 88 (s0_nf +& 1.U) << lmulLog2Pos, 89 (s0_nf +& 1.U) << emulLog2Pos 90 ) 91 92 val vvl = io.in.bits.src_vl.asTypeOf(VConfig()).vl 93 val evl = Mux(isUsWholeReg, 94 GenUSWholeRegVL(io.in.bits.uop.vpu.nf +& 1.U, s0_eew), 95 Mux(isMaskReg, 96 GenUSMaskRegVL(vvl), 97 vvl)) 98 val vvstart = io.in.bits.uop.vpu.vstart 99 val alignedType = Mux(isIndexed(instType), s0_sew(1, 0), s0_eew(1, 0)) 100 val broadenAligendType = Mux(s0_preIsSplit, Cat("b0".U, alignedType), "b100".U) // if is unit-stride, use 128-bits memory access 101 val flowsLog2 = GenRealFlowLog2(instType, s0_emul, s0_lmul, s0_eew, s0_sew) 102 val flowsPrevThisUop = (uopIdxInField << flowsLog2).asUInt // # of flows before this uop in a field 103 val flowsPrevThisVd = (vdIdxInField << numFlowsSameVdLog2).asUInt // # of flows before this vd in a field 104 val flowsIncludeThisUop = ((uopIdxInField +& 1.U) << flowsLog2).asUInt // # of flows before this uop besides this uop 105 val flowNum = io.in.bits.flowNum.get 106 107 // For vectore indexed instructions: 108 // When emul is greater than lmul, multiple uop correspond to a Vd, e.g: 109 // vsetvli t1,t0,e8,m1,ta,ma lmul = 1 110 // vluxei16.v v2,(a0),v8 emul = 2 111 // In this case, we need to ensure the flownumis right shift by flowsPrevThisUop, However, the mask passed to mergebuff is right shift by flowsPrevThisVd e.g: 112 // vl = 9 113 // srcMask = 0x1FF 114 // uopIdxInField = 0 and vdIdxInField = 0, flowMask = 0x00FF, toMergeBuffMask = 0x01FF 115 // uopIdxInField = 1 and vdIdxInField = 0, flowMask = 0x0001, toMergeBuffMask = 0x01FF 116 // uopIdxInField = 0 and vdIdxInField = 0, flowMask = 0x0000, toMergeBuffMask = 0x0000 117 // uopIdxInField = 0 and vdIdxInField = 0, flowMask = 0x0000, toMergeBuffMask = 0x0000 118 val isSpecialIndexed = isIndexed(instType) && s0_emul.asSInt > s0_lmul.asSInt 119 120 val srcMask = GenFlowMask(Mux(s0_vm, Fill(VLEN, 1.U(1.W)), io.in.bits.src_mask), vvstart, evl, true) 121 val srcMaskShiftBits = Mux(isSpecialIndexed, flowsPrevThisUop, flowsPrevThisVd) 122 123 val flowMask = ((srcMask & 124 UIntToMask(flowsIncludeThisUop.asUInt, VLEN + 1) & 125 (~UIntToMask(flowsPrevThisUop.asUInt, VLEN)).asUInt 126 ) >> srcMaskShiftBits)(VLENB - 1, 0) 127 val indexedSrcMask = (srcMask >> flowsPrevThisVd).asUInt //only for index instructions 128 129 // Used to calculate the element index. 130 // See 'splitbuffer' for 'io.out.splitIdxOffset' and 'mergebuffer' for 'merge data' 131 val indexedSplitOffset = Mux(isSpecialIndexed, flowsPrevThisUop - flowsPrevThisVd, 0.U) // only for index instructions of emul > lmul 132 val vlmax = GenVLMAX(s0_lmul, s0_sew) 133 134 // connect 135 s0_out := DontCare 136 s0_out match {case x => 137 x.uop := io.in.bits.uop 138 x.uop.vpu.vl := evl 139 x.uop.uopIdx := uopIdx 140 x.uop.numUops := numUops 141 x.uop.lastUop := (uopIdx +& 1.U) === numUops 142 x.uop.vpu.nf := s0_nf 143 x.flowMask := flowMask 144 x.indexedSrcMask := indexedSrcMask // Only vector indexed instructions uses it 145 x.indexedSplitOffset := indexedSplitOffset 146 x.byteMask := GenUopByteMask(flowMask, Cat("b0".U, alignedType))(VLENB - 1, 0) 147 x.fof := isUnitStride(s0_mop) && us_fof(s0_fuOpType) 148 x.baseAddr := io.in.bits.src_rs1 149 x.stride := io.in.bits.src_stride 150 x.flowNum := flowNum 151 x.nfields := s0_nfield 152 x.vm := s0_vm 153 x.usWholeReg := isUsWholeReg 154 x.usMaskReg := isMaskReg 155 x.eew := s0_eew 156 x.sew := s0_sew 157 x.emul := s0_emul 158 x.lmul := s0_lmul 159 x.vlmax := Mux(isUsWholeReg, evl, vlmax) 160 x.instType := instType 161 x.data := io.in.bits.src_vs3 162 x.vdIdxInField := vdIdxInField 163 x.preIsSplit := s0_preIsSplit 164 x.alignedType := broadenAligendType 165 } 166 s0_valid := io.in.valid && !s0_kill 167 /**------------------------------------- 168 * s1 stage 169 * ------------------------------------ 170 * generate UopOffset 171 */ 172 val s1_valid = RegInit(false.B) 173 val s1_kill = Wire(Bool()) 174 val s1_in = Wire(new VLSBundle(isVStore)) 175 val s1_can_go = io.out.ready && io.toMergeBuffer.resp.valid 176 val s1_fire = s1_valid && !s1_kill && s1_can_go 177 178 s1_ready := s1_kill || !s1_valid || io.out.ready && io.toMergeBuffer.resp.valid 179 180 when(s0_fire){ 181 s1_valid := true.B 182 }.elsewhen(s1_fire){ 183 s1_valid := false.B 184 }.elsewhen(s1_kill){ 185 s1_valid := false.B 186 } 187 s1_in := RegEnable(s0_out, s0_fire) 188 189 val s1_flowNum = s1_in.flowNum 190 val s1_uopidx = s1_in.uop.vpu.vuopIdx 191 val s1_nf = s1_in.uop.vpu.nf 192 val s1_nfields = s1_in.nfields 193 val s1_eew = s1_in.eew 194 val s1_emul = s1_in.emul 195 val s1_lmul = s1_in.lmul 196 val s1_instType = s1_in.instType 197 val s1_stride = s1_in.stride 198 val s1_vmask = FillInterleaved(8, s1_in.byteMask)(VLEN-1, 0) 199 val s1_alignedType = s1_in.alignedType 200 val s1_isSpecialIndexed = isIndexed(s1_instType) && s1_emul.asSInt > s1_lmul.asSInt 201 val s1_mask = Mux(s1_isSpecialIndexed, s1_in.indexedSrcMask, s1_in.flowMask) 202 val s1_vdIdx = s1_in.vdIdxInField 203 val s1_notIndexedStride = Mux( // stride for strided/unit-stride instruction 204 isStrided(s1_instType), 205 s1_stride(XLEN - 1, 0), // for strided load, stride = x[rs2] 206 s1_nfields << s1_eew(1, 0) // for unit-stride load, stride = eew * NFIELDS 207 ) 208 209 val stride = Mux(isIndexed(s1_instType), s1_stride, s1_notIndexedStride).asUInt // if is index instructions, get index when split 210 val uopOffset = genVUopOffset(s1_instType, s1_uopidx, s1_nf, s1_eew(1, 0), stride, s1_alignedType) 211 212 s1_kill := s1_in.uop.robIdx.needFlush(io.redirect) 213 214 // query mergeBuffer 215 io.toMergeBuffer.req.valid := s1_fire // only can_go will get MergeBuffer entry 216 io.toMergeBuffer.req.bits.flowNum := Mux(s1_in.preIsSplit, PopCount(s1_in.flowMask), s1_flowNum) 217 io.toMergeBuffer.req.bits.data := s1_in.data 218 io.toMergeBuffer.req.bits.uop := s1_in.uop 219 io.toMergeBuffer.req.bits.mask := s1_mask 220 io.toMergeBuffer.req.bits.vaddr := DontCare 221 io.toMergeBuffer.req.bits.vdIdx := s1_vdIdx //TODO vdIdxReg should no longer be useful, don't delete it for now 222 io.toMergeBuffer.req.bits.fof := s1_in.fof 223 io.toMergeBuffer.req.bits.vlmax := s1_in.vlmax 224// io.toMergeBuffer.req.bits.vdOffset := 225 226 //TODO vdIdxReg should no longer be useful, don't delete it for now 227// when (s1_in.uop.lastUop && s1_fire || s1_kill) { 228// vdIdxReg := 0.U 229// }.elsewhen(s1_fire) { 230// vdIdxReg := vdIdxReg + 1.U 231// XSError(vdIdxReg + 1.U === 0.U, s"Overflow! The number of vd should be less than 8\n") 232// } 233 // out connect 234 io.out.valid := s1_valid && io.toMergeBuffer.resp.valid 235 io.out.bits := s1_in 236 io.out.bits.uopOffset := uopOffset 237 io.out.bits.stride := stride 238 io.out.bits.mBIndex := io.toMergeBuffer.resp.bits.mBIndex 239 240 XSPerfAccumulate("split_out", io.out.fire) 241 XSPerfAccumulate("pipe_block", io.out.valid && !io.out.ready) 242 XSPerfAccumulate("mbuffer_block", s1_valid && io.out.ready && !io.toMergeBuffer.resp.valid) 243} 244 245abstract class VSplitBuffer(isVStore: Boolean = false)(implicit p: Parameters) extends VLSUModule{ 246 val io = IO(new VSplitBufferIO(isVStore)) 247 248 val bufferSize: Int 249 250 class VSplitPtr(implicit p: Parameters) extends CircularQueuePtr[VSplitPtr](bufferSize){ 251 } 252 253 object VSplitPtr { 254 def apply(f: Bool, v: UInt)(implicit p: Parameters): VSplitPtr = { 255 val ptr = Wire(new VSplitPtr) 256 ptr.flag := f 257 ptr.value := v 258 ptr 259 } 260 } 261 262 val uopq = Reg(Vec(bufferSize, new VLSBundle(isVStore))) 263 val valid = RegInit(VecInit(Seq.fill(bufferSize)(false.B))) 264 val srcMaskVec = Reg(Vec(bufferSize, UInt(VLEN.W))) 265 // ptr 266 val enqPtr = RegInit(0.U.asTypeOf(new VSplitPtr)) 267 val deqPtr = RegInit(0.U.asTypeOf(new VSplitPtr)) 268 // for split 269 val splitIdx = RegInit(0.U(flowIdxBits.W)) 270 val strideOffsetReg = RegInit(0.U(VLEN.W)) 271 272 /** 273 * Redirect 274 */ 275 val flushed = WireInit(VecInit(Seq.fill(bufferSize)(false.B))) // entry has been flushed by the redirect arrived in the pre 1 cycle 276 val flushVec = (valid zip flushed).zip(uopq).map { case ((v, f), entry) => v && entry.uop.robIdx.needFlush(io.redirect) && !f } 277 val flushEnq = io.in.fire && io.in.bits.uop.robIdx.needFlush(io.redirect) 278 val flushNumReg = RegNext(PopCount(flushEnq +: flushVec)) 279 val redirectReg = RegNext(io.redirect) 280 val flushVecReg = RegNext(WireInit(VecInit(flushVec))) 281 282 // enqueue, if redirect, it will be flush next cycle 283 when (io.in.fire) { 284 val id = enqPtr.value 285 uopq(id) := io.in.bits 286 valid(id) := true.B 287 } 288 io.in.ready := isNotBefore(enqPtr, deqPtr) 289 290 //split uops 291 val issueValid = valid(deqPtr.value) 292 val issueEntry = uopq(deqPtr.value) 293 val issueMbIndex = issueEntry.mBIndex 294 val issueFlowNum = issueEntry.flowNum 295 val issueBaseAddr = issueEntry.baseAddr 296 val issueUop = issueEntry.uop 297 val issueUopIdx = issueUop.vpu.vuopIdx 298 val issueInstType = issueEntry.instType 299 val issueUopOffset = issueEntry.uopOffset 300 val issueEew = issueEntry.eew 301 val issueSew = issueEntry.sew 302 val issueLmul = issueEntry.lmul 303 val issueEmul = issueEntry.emul 304 val issueAlignedType = issueEntry.alignedType 305 val issuePreIsSplit = issueEntry.preIsSplit 306 val issueByteMask = issueEntry.byteMask 307 val issueVLMAXMask = issueEntry.vlmax - 1.U 308 val issueIsWholeReg = issueEntry.usWholeReg 309 val issueVLMAXLog2 = GenVLMAXLog2(issueEntry.lmul, issueSew) 310 val elemIdx = GenElemIdx( 311 instType = issueInstType, 312 emul = issueEmul, 313 lmul = issueLmul, 314 eew = issueEew, 315 sew = issueSew, 316 uopIdx = issueUopIdx, 317 flowIdx = splitIdx 318 ) // elemIdx inside an inst, for exception 319 320 val splitIdxOffset = issueEntry.indexedSplitOffset + splitIdx 321 322 val elemIdxInsideField = elemIdx & issueVLMAXMask 323 val indexFlowInnerIdx = ((elemIdxInsideField << issueEew(1, 0))(vOffsetBits - 1, 0) >> issueEew(1, 0)).asUInt 324 val nfIdx = Mux(issueIsWholeReg, 0.U, elemIdx >> issueVLMAXLog2) 325 val fieldOffset = nfIdx << issueAlignedType // field offset inside a segment 326 327 val indexedStride = IndexAddr( // index for indexed instruction 328 index = issueEntry.stride, 329 flow_inner_idx = indexFlowInnerIdx, 330 eew = issueEew 331 ) 332 val issueStride = Mux(isIndexed(issueInstType), indexedStride, strideOffsetReg) 333 val vaddr = issueBaseAddr + issueUopOffset + issueStride 334 val mask = genVWmask128(vaddr ,issueAlignedType) // scala maske for flow 335 val flowMask = issueEntry.flowMask 336 val vecActive = (flowMask & UIntToOH(splitIdx)).orR 337 /* 338 * Unit-Stride split to one flow or two flow. 339 * for Unit-Stride, if uop's addr is aligned with 128-bits, split it to one flow, otherwise split two 340 */ 341 342 val usAligned128 = (vaddr(3,0) === 0.U)// addr 128-bit aligned 343 val usSplitMask = genUSSplitMask(issueByteMask, splitIdx, vaddr(3,0)) 344 val usNoSplit = (usAligned128 || !(vaddr(3,0) +& PopCount(usSplitMask))(4)) && !issuePreIsSplit && (splitIdx === 0.U)// unit-stride uop don't need to split into two flow 345 val usSplitVaddr = genUSSplitAddr(vaddr, splitIdx) 346 val regOffset = vaddr(3,0) // offset in 256-bits vd 347 XSError((splitIdx > 1.U && usNoSplit) || (splitIdx > 1.U && !issuePreIsSplit) , "Unit-Stride addr split error!\n") 348 349 // data 350 io.out.bits match { case x => 351 x.uop := issueUop 352 x.vaddr := Mux(!issuePreIsSplit, usSplitVaddr, vaddr) 353 x.alignedType := issueAlignedType 354 x.isvec := true.B 355 x.mask := Mux(!issuePreIsSplit, usSplitMask, mask) 356 x.reg_offset := regOffset //for merge unit-stride data 357 x.vecActive := vecActive 358 x.is_first_ele := DontCare 359 x.usSecondInv := usNoSplit 360 x.elemIdx := elemIdx 361 x.elemIdxInsideVd := splitIdxOffset // if is Unit-Stride, elemIdx is the index of 2 splited mem request (for merge data) 362 x.uop_unit_stride_fof := DontCare 363 x.isFirstIssue := DontCare 364 x.mBIndex := issueMbIndex 365 } 366 367 //update enqptr 368 when (redirectReg.valid && flushNumReg =/= 0.U) { 369 enqPtr := enqPtr - flushNumReg 370 }.otherwise { 371 when (io.in.fire) { 372 enqPtr := enqPtr + 1.U 373 } 374 } 375 376 // flush queue 377 for (i <- 0 until bufferSize) { 378 when(flushVecReg(i) && redirectReg.valid && flushNumReg =/= 0.U) { 379 valid(i) := false.B 380 flushed(i) := true.B 381 } 382 } 383 384 /* Execute logic */ 385 /** Issue to scala pipeline**/ 386 val canIssue = Wire(Bool()) 387 val allowIssue = io.out.ready 388 val activeIssue = Wire(Bool()) 389 val deqValid = valid(deqPtr.value) 390 val inActiveIssue = deqValid && canIssue && !vecActive && issuePreIsSplit 391 val issueCount = Mux(usNoSplit, 2.U, (PopCount(inActiveIssue) + PopCount(activeIssue))) // for dont need split unit-stride, issue two flow 392 393 // handshake 394 val thisPtr = deqPtr.value 395 canIssue := !issueUop.robIdx.needFlush(io.redirect) && 396 !issueUop.robIdx.needFlush(redirectReg) && 397 deqPtr < enqPtr 398 activeIssue := canIssue && allowIssue && (vecActive || !issuePreIsSplit) // active issue, current use in no unit-stride 399 when (!RegNext(io.redirect.valid) || distanceBetween(enqPtr, deqPtr) > flushNumReg) { 400 when ((splitIdx < (issueFlowNum - issueCount))) { 401 when (activeIssue || inActiveIssue) { 402 // The uop has not been entirly splited yet 403 splitIdx := splitIdx + issueCount 404 strideOffsetReg := Mux(!issuePreIsSplit, strideOffsetReg, strideOffsetReg + issueEntry.stride) // when normal unit-stride, don't use strideOffsetReg 405 } 406 }.otherwise { 407 when (activeIssue || inActiveIssue) { 408 // The uop is done spliting 409 splitIdx := 0.U(flowIdxBits.W) // initialize flowIdx 410 valid(deqPtr.value) := false.B 411 strideOffsetReg := 0.U 412 deqPtr := deqPtr + 1.U 413 } 414 } 415 }.otherwise { 416 splitIdx := 0.U(flowIdxBits.W) // initialize flowIdx 417 strideOffsetReg := 0.U 418 } 419 420 // out connect 421 io.out.valid := canIssue && (vecActive || !issuePreIsSplit) // TODO: inactive uop do not send to pipeline 422 423 XSPerfAccumulate("out_valid", io.out.valid) 424 XSPerfAccumulate("out_fire", io.out.fire) 425 XSPerfAccumulate("out_fire_unitstride", io.out.fire && !issuePreIsSplit) 426 XSPerfAccumulate("unitstride_vlenAlign", io.out.fire && !issuePreIsSplit && io.out.bits.vaddr(3, 0) === 0.U) 427 XSPerfAccumulate("unitstride_invalid", io.out.ready && canIssue && !issuePreIsSplit && PopCount(io.out.bits.mask).orR) 428 429 QueuePerf(bufferSize, distanceBetween(enqPtr, deqPtr), !io.in.ready) 430} 431 432class VSSplitBufferImp(implicit p: Parameters) extends VSplitBuffer(isVStore = true){ 433 override lazy val bufferSize = SplitBufferSize 434 // split data 435 val splitData = genVSData( 436 data = issueEntry.data.asUInt, 437 elemIdx = splitIdxOffset, 438 alignedType = issueAlignedType 439 ) 440 val flowData = genVWdata(splitData, issueAlignedType) 441 val usSplitData = genUSSplitData(issueEntry.data.asUInt, splitIdx, vaddr(3,0)) 442 443 val sqIdx = issueUop.sqIdx + splitIdx 444 io.out.bits.uop.sqIdx := sqIdx 445 446 // send data to sq 447 val vstd = io.vstd.get 448 vstd.valid := canIssue 449 vstd.bits.uop := issueUop 450 vstd.bits.uop.sqIdx := sqIdx 451 vstd.bits.data := Mux(!issuePreIsSplit, usSplitData, flowData) 452 vstd.bits.debug := DontCare 453 vstd.bits.vdIdx.get := DontCare 454 vstd.bits.vdIdxInField.get := DontCare 455 vstd.bits.mask.get := Mux(!issuePreIsSplit, usSplitMask, mask) 456 457} 458 459class VLSplitBufferImp(implicit p: Parameters) extends VSplitBuffer(isVStore = false){ 460 override lazy val bufferSize = SplitBufferSize 461 io.out.bits.uop.lqIdx := issueUop.lqIdx + splitIdx 462} 463 464class VSSplitPipelineImp(implicit p: Parameters) extends VSplitPipeline(isVStore = true){ 465 override def us_whole_reg(fuOpType: UInt): Bool = fuOpType === VstuType.vsr 466 override def us_mask(fuOpType: UInt): Bool = fuOpType === VstuType.vsm 467 override def us_fof(fuOpType: UInt): Bool = false.B // dont have vector fof store 468} 469 470class VLSplitPipelineImp(implicit p: Parameters) extends VSplitPipeline(isVStore = false){ 471 472 override def us_whole_reg(fuOpType: UInt): Bool = fuOpType === VlduType.vlr 473 override def us_mask(fuOpType: UInt): Bool = fuOpType === VlduType.vlm 474 override def us_fof(fuOpType: UInt): Bool = fuOpType === VlduType.vleff 475} 476 477class VLSplitImp(implicit p: Parameters) extends VLSUModule{ 478 val io = IO(new VSplitIO(isVStore=false)) 479 val splitPipeline = Module(new VLSplitPipelineImp()) 480 val splitBuffer = Module(new VLSplitBufferImp()) 481 // Split Pipeline 482 splitPipeline.io.in <> io.in 483 splitPipeline.io.redirect <> io.redirect 484 io.toMergeBuffer <> splitPipeline.io.toMergeBuffer 485 486 // Split Buffer 487 splitBuffer.io.in <> splitPipeline.io.out 488 splitBuffer.io.redirect <> io.redirect 489 io.out <> splitBuffer.io.out 490} 491 492class VSSplitImp(implicit p: Parameters) extends VLSUModule{ 493 val io = IO(new VSplitIO(isVStore=true)) 494 val splitPipeline = Module(new VSSplitPipelineImp()) 495 val splitBuffer = Module(new VSSplitBufferImp()) 496 // Split Pipeline 497 splitPipeline.io.in <> io.in 498 splitPipeline.io.redirect <> io.redirect 499 io.toMergeBuffer <> splitPipeline.io.toMergeBuffer 500 501 // Split Buffer 502 splitBuffer.io.in <> splitPipeline.io.out 503 splitBuffer.io.redirect <> io.redirect 504 io.out <> splitBuffer.io.out 505 io.vstd.get <> splitBuffer.io.vstd.get 506} 507 508