1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.backend.rob.RobPtr 26import xiangshan.backend.Bundles._ 27import xiangshan.mem._ 28import xiangshan.backend.fu.vector.Bundles._ 29import xiangshan.backend.fu.FuConfig._ 30 31 32class VSplitPipeline(isVStore: Boolean = false)(implicit p: Parameters) extends VLSUModule{ 33 val io = IO(new VSplitPipelineIO(isVStore)) 34 // will be override later 35 def us_whole_reg(fuOpType: UInt): Bool = false.B 36 def us_mask(fuOpType: UInt): Bool = false.B 37 def us_fof(fuOpType: UInt): Bool = false.B 38 //TODO vdIdxReg should no longer be useful, don't delete it for now 39 val vdIdxReg = RegInit(0.U(3.W)) 40 41 val s1_ready = WireInit(false.B) 42 io.in.ready := s1_ready 43 44 /**----------------------------------------------------------- 45 * s0 stage 46 * decode and generate AlignedType, uop mask, preIsSplit 47 * ---------------------------------------------------------- 48 */ 49 val s0_uop = io.in.bits.uop 50 val s0_vtype = s0_uop.vpu.vtype 51 val s0_sew = s0_vtype.vsew 52 val s0_eew = s0_uop.vpu.veew 53 val s0_lmul = s0_vtype.vlmul 54 // when load whole register or unit-stride masked , emul should be 1 55 val s0_fuOpType = s0_uop.fuOpType 56 val s0_mop = s0_fuOpType(6, 5) 57 val s0_nf = Mux(us_whole_reg(s0_fuOpType), 0.U, s0_uop.vpu.nf) 58 val s0_vm = s0_uop.vpu.vm 59 val s0_emul = Mux(us_whole_reg(s0_fuOpType) ,GenUSWholeEmul(s0_uop.vpu.nf), Mux(us_mask(s0_fuOpType), 0.U(mulBits.W), EewLog2(s0_eew) - s0_sew + s0_lmul)) 60 val s0_preIsSplit = !(isUnitStride(s0_mop) && !us_fof(s0_fuOpType)) 61 val s0_nfield = s0_nf +& 1.U 62 63 val s0_valid = Wire(Bool()) 64 val s0_kill = io.in.bits.uop.robIdx.needFlush(io.redirect) 65 val s0_can_go = s1_ready 66 val s0_fire = s0_valid && s0_can_go 67 val s0_out = Wire(new VLSBundle(isVStore)) 68 69 val isUsWholeReg = isUnitStride(s0_mop) && us_whole_reg(s0_fuOpType) 70 val isMaskReg = isUnitStride(s0_mop) && us_mask(s0_fuOpType) 71 val isSegment = s0_nf =/= 0.U && !us_whole_reg(s0_fuOpType) 72 val instType = Cat(isSegment, s0_mop) 73 val uopIdx = io.in.bits.uop.vpu.vuopIdx 74 val uopIdxInField = GenUopIdxInField(instType, s0_emul, s0_lmul, uopIdx) 75 val vdIdxInField = GenVdIdxInField(instType, s0_emul, s0_lmul, uopIdxInField) 76 val lmulLog2 = Mux(s0_lmul.asSInt >= 0.S, 0.U, s0_lmul) 77 val emulLog2 = Mux(s0_emul.asSInt >= 0.S, 0.U, s0_emul) 78 val numEewLog2 = emulLog2 - EewLog2(s0_eew) 79 val numSewLog2 = lmulLog2 - s0_sew 80 val numFlowsSameVdLog2 = Mux( 81 isIndexed(instType), 82 log2Up(VLENB).U - s0_sew(1,0), 83 log2Up(VLENB).U - s0_eew(1,0) 84 ) 85 // numUops = nf * max(lmul, emul) 86 val lmulLog2Pos = Mux(s0_lmul.asSInt < 0.S, 0.U, s0_lmul) 87 val emulLog2Pos = Mux(s0_emul.asSInt < 0.S, 0.U, s0_emul) 88 val numUops = Mux( 89 isIndexed(s0_mop) && s0_lmul.asSInt > s0_emul.asSInt, 90 (s0_nf +& 1.U) << lmulLog2Pos, 91 (s0_nf +& 1.U) << emulLog2Pos 92 ) 93 94 val vvl = io.in.bits.src_vl.asTypeOf(VConfig()).vl 95 val evl = Mux(isUsWholeReg, 96 GenUSWholeRegVL(io.in.bits.uop.vpu.nf +& 1.U, s0_eew), 97 Mux(isMaskReg, 98 GenUSMaskRegVL(vvl), 99 vvl)) 100 val vvstart = io.in.bits.uop.vpu.vstart 101 val alignedType = Mux(isIndexed(instType), s0_sew(1, 0), s0_eew(1, 0)) 102 val broadenAligendType = Mux(s0_preIsSplit, Cat("b0".U, alignedType), "b100".U) // if is unit-stride, use 128-bits memory access 103 val flowsLog2 = GenRealFlowLog2(instType, s0_emul, s0_lmul, s0_eew, s0_sew) 104 val flowsPrevThisUop = (uopIdxInField << flowsLog2).asUInt // # of flows before this uop in a field 105 val flowsPrevThisVd = (vdIdxInField << numFlowsSameVdLog2).asUInt // # of flows before this vd in a field 106 val flowsIncludeThisUop = ((uopIdxInField +& 1.U) << flowsLog2).asUInt // # of flows before this uop besides this uop 107 val flowNum = io.in.bits.flowNum.get 108 // max index in vd, only use in index instructions for calculate index 109 val maxIdxInVdIndex = GenVLMAX(Mux(s0_emul.asSInt > 0.S, 0.U, s0_emul), s0_eew(1, 0)) 110 val indexVlMaxInVd = GenVlMaxMask(maxIdxInVdIndex, elemIdxBits) 111 112 // For vectore indexed instructions: 113 // When emul is greater than lmul, multiple uop correspond to a Vd, e.g: 114 // vsetvli t1,t0,e8,m1,ta,ma lmul = 1 115 // vluxei16.v v2,(a0),v8 emul = 2 116 // In this case, we need to ensure the flownumis right shift by flowsPrevThisUop, However, the mask passed to mergebuff is right shift by flowsPrevThisVd e.g: 117 // vl = 9 118 // srcMask = 0x1FF 119 // uopIdxInField = 0 and vdIdxInField = 0, flowMask = 0x00FF, toMergeBuffMask = 0x01FF 120 // uopIdxInField = 1 and vdIdxInField = 0, flowMask = 0x0001, toMergeBuffMask = 0x01FF 121 // uopIdxInField = 0 and vdIdxInField = 0, flowMask = 0x0000, toMergeBuffMask = 0x0000 122 // uopIdxInField = 0 and vdIdxInField = 0, flowMask = 0x0000, toMergeBuffMask = 0x0000 123 val isSpecialIndexed = isIndexed(instType) && s0_emul.asSInt > s0_lmul.asSInt 124 125 val srcMask = GenFlowMask(Mux(s0_vm, Fill(VLEN, 1.U(1.W)), io.in.bits.src_mask), vvstart, evl, true) 126 val srcMaskShiftBits = Mux(isSpecialIndexed, flowsPrevThisUop, flowsPrevThisVd) 127 128 val flowMask = ((srcMask & 129 UIntToMask(flowsIncludeThisUop.asUInt, VLEN + 1) & 130 (~UIntToMask(flowsPrevThisUop.asUInt, VLEN)).asUInt 131 ) >> srcMaskShiftBits)(VLENB - 1, 0) 132 val indexedSrcMask = (srcMask >> flowsPrevThisVd).asUInt //only for index instructions 133 134 // Used to calculate the element index. 135 // See 'splitbuffer' for 'io.out.splitIdxOffset' and 'mergebuffer' for 'merge data' 136 val indexedSplitOffset = Mux(isSpecialIndexed, flowsPrevThisUop - flowsPrevThisVd, 0.U) // only for index instructions of emul > lmul 137 val vlmax = GenVLMAX(s0_lmul, s0_sew) 138 139 // connect 140 s0_out := DontCare 141 s0_out match {case x => 142 x.uop := io.in.bits.uop 143 x.uop.vpu.vl := evl 144 x.uop.uopIdx := uopIdx 145 x.uop.numUops := numUops 146 x.uop.lastUop := (uopIdx +& 1.U) === numUops 147 x.uop.vpu.nf := s0_nf 148 x.flowMask := flowMask 149 x.indexedSrcMask := indexedSrcMask // Only vector indexed instructions uses it 150 x.indexedSplitOffset := indexedSplitOffset 151 x.byteMask := GenUopByteMask(flowMask, Cat("b0".U, alignedType))(VLENB - 1, 0) 152 x.fof := isUnitStride(s0_mop) && us_fof(s0_fuOpType) 153 x.baseAddr := io.in.bits.src_rs1 154 x.stride := io.in.bits.src_stride 155 x.flowNum := flowNum 156 x.nfields := s0_nfield 157 x.vm := s0_vm 158 x.usWholeReg := isUsWholeReg 159 x.usMaskReg := isMaskReg 160 x.eew := s0_eew 161 x.sew := s0_sew 162 x.emul := s0_emul 163 x.lmul := s0_lmul 164 x.vlmax := Mux(isUsWholeReg, evl, vlmax) 165 x.instType := instType 166 x.data := io.in.bits.src_vs3 167 x.vdIdxInField := vdIdxInField 168 x.preIsSplit := s0_preIsSplit 169 x.alignedType := broadenAligendType 170 x.indexVlMaxInVd := indexVlMaxInVd 171 } 172 s0_valid := io.in.valid && !s0_kill 173 /**------------------------------------- 174 * s1 stage 175 * ------------------------------------ 176 * generate UopOffset 177 */ 178 val s1_valid = RegInit(false.B) 179 val s1_kill = Wire(Bool()) 180 val s1_in = Wire(new VLSBundle(isVStore)) 181 val s1_can_go = io.out.ready && io.toMergeBuffer.req.ready 182 val s1_fire = s1_valid && !s1_kill && s1_can_go 183 184 s1_ready := s1_kill || !s1_valid || s1_can_go 185 186 when(s0_fire){ 187 s1_valid := true.B 188 }.elsewhen(s1_fire){ 189 s1_valid := false.B 190 }.elsewhen(s1_kill){ 191 s1_valid := false.B 192 } 193 s1_in := RegEnable(s0_out, s0_fire) 194 195 val s1_flowNum = s1_in.flowNum 196 val s1_uop = s1_in.uop 197 val s1_uopidx = s1_uop.vpu.vuopIdx 198 val s1_nf = s1_uop.vpu.nf 199 val s1_nfields = s1_in.nfields 200 val s1_eew = s1_in.eew 201 val s1_emul = s1_in.emul 202 val s1_lmul = s1_in.lmul 203 val s1_instType = s1_in.instType 204 val s1_stride = s1_in.stride 205 val s1_vmask = FillInterleaved(8, s1_in.byteMask)(VLEN-1, 0) 206 val s1_alignedType = s1_in.alignedType 207 val s1_isSpecialIndexed = isIndexed(s1_instType) && s1_emul.asSInt > s1_lmul.asSInt 208 val s1_mask = Mux(s1_isSpecialIndexed, s1_in.indexedSrcMask, s1_in.flowMask) 209 val s1_vdIdx = s1_in.vdIdxInField 210 val s1_fof = s1_in.fof 211 val s1_notIndexedStride = Mux( // stride for strided/unit-stride instruction 212 isStrided(s1_instType), 213 s1_stride(XLEN - 1, 0), // for strided load, stride = x[rs2] 214 s1_nfields << s1_eew(1, 0) // for unit-stride load, stride = eew * NFIELDS 215 ) 216 217 val stride = Mux(isIndexed(s1_instType), s1_stride, s1_notIndexedStride).asUInt // if is index instructions, get index when split 218 val uopOffset = genVUopOffset(s1_instType, s1_fof, s1_uopidx, s1_nf, s1_eew(1, 0), stride, s1_alignedType) 219 val activeNum = Mux(s1_in.preIsSplit, PopCount(s1_in.flowMask), s1_flowNum) 220 // for Unit-Stride, if uop's addr is aligned with 128-bits, split it to one flow, otherwise split two 221 val usLowBitsAddr = getCheckAddrLowBits(s1_in.baseAddr, maxMemByteNum) + getCheckAddrLowBits(uopOffset, maxMemByteNum) 222 val usAligned128 = (getCheckAddrLowBits(usLowBitsAddr, maxMemByteNum) === 0.U)// addr 128-bit aligned 223 224 s1_kill := s1_in.uop.robIdx.needFlush(io.redirect) 225 226 // query mergeBuffer 227 io.toMergeBuffer.req.valid := io.out.ready && s1_valid// only can_go will get MergeBuffer entry 228 io.toMergeBuffer.req.bits.flowNum := activeNum 229 io.toMergeBuffer.req.bits.data := s1_in.data 230 io.toMergeBuffer.req.bits.uop := s1_in.uop 231 io.toMergeBuffer.req.bits.mask := s1_mask 232 io.toMergeBuffer.req.bits.vaddr := DontCare 233 io.toMergeBuffer.req.bits.vdIdx := s1_vdIdx //TODO vdIdxReg should no longer be useful, don't delete it for now 234 io.toMergeBuffer.req.bits.fof := s1_in.fof 235 io.toMergeBuffer.req.bits.vlmax := s1_in.vlmax 236// io.toMergeBuffer.req.bits.vdOffset := 237 238 //TODO vdIdxReg should no longer be useful, don't delete it for now 239// when (s1_in.uop.lastUop && s1_fire || s1_kill) { 240// vdIdxReg := 0.U 241// }.elsewhen(s1_fire) { 242// vdIdxReg := vdIdxReg + 1.U 243// XSError(vdIdxReg + 1.U === 0.U, s"Overflow! The number of vd should be less than 8\n") 244// } 245 // out connect 246 io.out.valid := s1_valid && io.toMergeBuffer.resp.valid && (activeNum =/= 0.U) // if activeNum == 0, this uop do nothing, can be killed. 247 io.out.bits := s1_in 248 io.out.bits.uopOffset := uopOffset 249 io.out.bits.stride := stride 250 io.out.bits.mBIndex := io.toMergeBuffer.resp.bits.mBIndex 251 io.out.bits.usLowBitsAddr := usLowBitsAddr 252 io.out.bits.usAligned128 := usAligned128 253 254 XSPerfAccumulate("split_out", io.out.fire) 255 XSPerfAccumulate("pipe_block", io.out.valid && !io.out.ready) 256 XSPerfAccumulate("mbuffer_block", s1_valid && io.out.ready && !io.toMergeBuffer.resp.valid) 257} 258 259abstract class VSplitBuffer(isVStore: Boolean = false)(implicit p: Parameters) extends VLSUModule{ 260 val io = IO(new VSplitBufferIO(isVStore)) 261 lazy val fuCfg = if(isVStore) VstuCfg else VlduCfg 262 263 val uopq = Reg(new VLSBundle(isVStore)) 264 val allocated = RegInit(false.B) 265 val needCancel = WireInit(false.B) 266 val activeIssue = Wire(Bool()) 267 val inActiveIssue = Wire(Bool()) 268 val splitFinish = WireInit(false.B) 269 270 // for split 271 val splitIdx = RegInit(0.U(flowIdxBits.W)) 272 val strideOffsetReg = RegInit(0.U(VLEN.W)) 273 274 /** 275 * Redirect 276 */ 277 val cancelEnq = io.in.bits.uop.robIdx.needFlush(io.redirect) 278 val canEnqueue = io.in.valid 279 val needEnqueue = canEnqueue && !cancelEnq 280 281 // enqueue 282 val offset = PopCount(needEnqueue) 283 val canAccept = !allocated || allocated && splitFinish && (activeIssue || inActiveIssue) // if is valid entry, need split finish and send last uop 284 io.in.ready := canAccept 285 val doEnqueue = canAccept && needEnqueue 286 287 when(doEnqueue){ 288 uopq := io.in.bits 289 } 290 291 //split uops 292 val issueValid = allocated && !needCancel 293 val issueEntry = uopq 294 val issueMbIndex = issueEntry.mBIndex 295 val issueFlowNum = issueEntry.flowNum 296 val issueBaseAddr = issueEntry.baseAddr 297 val issueUop = issueEntry.uop 298 val issueUopIdx = issueUop.vpu.vuopIdx 299 val issueInstType = issueEntry.instType 300 val issueUopOffset = issueEntry.uopOffset 301 val issueEew = issueEntry.eew 302 val issueSew = issueEntry.sew 303 val issueLmul = issueEntry.lmul 304 val issueEmul = issueEntry.emul 305 val issueAlignedType = issueEntry.alignedType 306 val issuePreIsSplit = issueEntry.preIsSplit 307 val issueByteMask = issueEntry.byteMask 308 val issueVLMAXMask = issueEntry.vlmax - 1.U 309 val issueIsWholeReg = issueEntry.usWholeReg 310 val issueVLMAXLog2 = GenVLMAXLog2(issueEntry.lmul, issueSew) 311 val issueVlMaxInVd = issueEntry.indexVlMaxInVd 312 val issueUsLowBitsAddr = issueEntry.usLowBitsAddr 313 val issueUsAligned128 = issueEntry.usAligned128 314 val elemIdx = GenElemIdx( 315 instType = issueInstType, 316 emul = issueEmul, 317 lmul = issueLmul, 318 eew = issueEew, 319 sew = issueSew, 320 uopIdx = issueUopIdx, 321 flowIdx = splitIdx 322 ) // elemIdx inside an inst, for exception 323 324 val splitIdxOffset = issueEntry.indexedSplitOffset + splitIdx 325 326 val indexFlowInnerIdx = elemIdx & issueVlMaxInVd 327 val nfIdx = Mux(issueIsWholeReg, 0.U, elemIdx >> issueVLMAXLog2) 328 val fieldOffset = nfIdx << issueAlignedType // field offset inside a segment 329 330 val indexedStride = IndexAddr( // index for indexed instruction 331 index = issueEntry.stride, 332 flow_inner_idx = indexFlowInnerIdx, 333 eew = issueEew 334 ) 335 val issueStride = Mux(isIndexed(issueInstType), indexedStride, strideOffsetReg) 336 val vaddr = issueBaseAddr + issueUopOffset + issueStride 337 val mask = genVWmask128(vaddr ,issueAlignedType) // scala maske for flow 338 val flowMask = issueEntry.flowMask 339 val vecActive = (flowMask & UIntToOH(splitIdx)).orR 340 /* 341 * Unit-Stride split to one flow or two flow. 342 * for Unit-Stride, if uop's addr is aligned with 128-bits, split it to one flow, otherwise split two 343 */ 344 val usSplitMask = genUSSplitMask(issueByteMask, splitIdx, getCheckAddrLowBits(issueUsLowBitsAddr, maxMemByteNum)) 345 val usNoSplit = (issueUsAligned128 || !getOverflowBit(getCheckAddrLowBits(issueUsLowBitsAddr, maxMemByteNum) +& PopCount(usSplitMask), maxMemByteNum)) && 346 !issuePreIsSplit && 347 (splitIdx === 0.U)// unit-stride uop don't need to split into two flow 348 val usSplitVaddr = genUSSplitAddr(vaddr, splitIdx) 349 val regOffset = getCheckAddrLowBits(issueUsLowBitsAddr, maxMemByteNum) // offset in 256-bits vd 350 XSError((splitIdx > 1.U && usNoSplit) || (splitIdx > 1.U && !issuePreIsSplit) , "Unit-Stride addr split error!\n") 351 352 // data 353 io.out.bits match { case x => 354 x.uop := issueUop 355 x.uop.exceptionVec := ExceptionNO.selectByFu(issueUop.exceptionVec, fuCfg) 356 x.vaddr := Mux(!issuePreIsSplit, usSplitVaddr, vaddr) 357 x.alignedType := issueAlignedType 358 x.isvec := true.B 359 x.mask := Mux(!issuePreIsSplit, usSplitMask, mask) 360 x.reg_offset := regOffset //for merge unit-stride data 361 x.vecActive := Mux(!issuePreIsSplit, true.B, vecActive) // currently, unit-stride's flow always send to pipeline 362 x.is_first_ele := DontCare 363 x.usSecondInv := usNoSplit 364 x.elemIdx := elemIdx 365 x.elemIdxInsideVd := splitIdxOffset // if is Unit-Stride, elemIdx is the index of 2 splited mem request (for merge data) 366 x.uop_unit_stride_fof := DontCare 367 x.isFirstIssue := DontCare 368 x.mBIndex := issueMbIndex 369 } 370 371 // redirect 372 needCancel := uopq.uop.robIdx.needFlush(io.redirect) && allocated 373 374 /* Execute logic */ 375 /** Issue to scala pipeline**/ 376 val allowIssue = io.out.ready 377 val issueCount = Mux(usNoSplit, 2.U, (PopCount(inActiveIssue) + PopCount(activeIssue))) // for dont need split unit-stride, issue two flow 378 splitFinish := splitIdx >= (issueFlowNum - issueCount) 379 380 // handshake 381 activeIssue := issueValid && allowIssue && (vecActive || !issuePreIsSplit) // active issue, current use in no unit-stride 382 inActiveIssue := issueValid && !vecActive && issuePreIsSplit 383 when (!issueEntry.uop.robIdx.needFlush(io.redirect)) { 384 when (!splitFinish) { 385 when (activeIssue || inActiveIssue) { 386 // The uop has not been entirly splited yet 387 splitIdx := splitIdx + issueCount 388 strideOffsetReg := Mux(!issuePreIsSplit, strideOffsetReg, strideOffsetReg + issueEntry.stride) // when normal unit-stride, don't use strideOffsetReg 389 } 390 }.otherwise { 391 when (activeIssue || inActiveIssue) { 392 // The uop is done spliting 393 splitIdx := 0.U(flowIdxBits.W) // initialize flowIdx 394 strideOffsetReg := 0.U 395 } 396 } 397 }.otherwise { 398 splitIdx := 0.U(flowIdxBits.W) // initialize flowIdx 399 strideOffsetReg := 0.U 400 } 401 // allocated 402 when(doEnqueue){ // if enqueue need to been cancelled, it will be false, so this have high priority 403 allocated := true.B 404 }.elsewhen(needCancel) { // redirect 405 allocated := false.B 406 }.elsewhen(splitFinish && (activeIssue || inActiveIssue)){ //dequeue 407 allocated := false.B 408 } 409 410 // out connect 411 io.out.valid := issueValid && (vecActive || !issuePreIsSplit) // TODO: inactive unit-stride uop do not send to pipeline 412 413 XSPerfAccumulate("out_valid", io.out.valid) 414 XSPerfAccumulate("out_fire", io.out.fire) 415 XSPerfAccumulate("out_fire_unitstride", io.out.fire && !issuePreIsSplit) 416 XSPerfAccumulate("unitstride_vlenAlign", io.out.fire && !issuePreIsSplit && getCheckAddrLowBits(io.out.bits.vaddr, maxMemByteNum) === 0.U) 417 XSPerfAccumulate("unitstride_invalid", io.out.ready && issueValid && !issuePreIsSplit && PopCount(io.out.bits.mask).orR) 418} 419 420class VSSplitBufferImp(implicit p: Parameters) extends VSplitBuffer(isVStore = true){ 421 // split data 422 val splitData = genVSData( 423 data = issueEntry.data.asUInt, 424 elemIdx = splitIdxOffset, 425 alignedType = issueAlignedType 426 ) 427 val flowData = genVWdata(splitData, issueAlignedType) 428 val usSplitData = genUSSplitData(issueEntry.data.asUInt, splitIdx, vaddr(3,0)) 429 430 val sqIdx = issueUop.sqIdx + splitIdx 431 io.out.bits.uop.sqIdx := sqIdx 432 433 // send data to sq 434 val vstd = io.vstd.get 435 vstd.valid := issueValid && (vecActive || !issuePreIsSplit) 436 vstd.bits.uop := issueUop 437 vstd.bits.uop.sqIdx := sqIdx 438 vstd.bits.data := Mux(!issuePreIsSplit, usSplitData, flowData) 439 vstd.bits.debug := DontCare 440 vstd.bits.vdIdx.get := DontCare 441 vstd.bits.vdIdxInField.get := DontCare 442 vstd.bits.mask.get := Mux(!issuePreIsSplit, usSplitMask, mask) 443 444} 445 446class VLSplitBufferImp(implicit p: Parameters) extends VSplitBuffer(isVStore = false){ 447 io.out.bits.uop.lqIdx := issueUop.lqIdx + splitIdx 448} 449 450class VSSplitPipelineImp(implicit p: Parameters) extends VSplitPipeline(isVStore = true){ 451 override def us_whole_reg(fuOpType: UInt): Bool = fuOpType === VstuType.vsr 452 override def us_mask(fuOpType: UInt): Bool = fuOpType === VstuType.vsm 453 override def us_fof(fuOpType: UInt): Bool = false.B // dont have vector fof store 454} 455 456class VLSplitPipelineImp(implicit p: Parameters) extends VSplitPipeline(isVStore = false){ 457 458 override def us_whole_reg(fuOpType: UInt): Bool = fuOpType === VlduType.vlr 459 override def us_mask(fuOpType: UInt): Bool = fuOpType === VlduType.vlm 460 override def us_fof(fuOpType: UInt): Bool = fuOpType === VlduType.vleff 461} 462 463class VLSplitImp(implicit p: Parameters) extends VLSUModule{ 464 val io = IO(new VSplitIO(isVStore=false)) 465 val splitPipeline = Module(new VLSplitPipelineImp()) 466 val splitBuffer = Module(new VLSplitBufferImp()) 467 // Split Pipeline 468 splitPipeline.io.in <> io.in 469 splitPipeline.io.redirect <> io.redirect 470 io.toMergeBuffer <> splitPipeline.io.toMergeBuffer 471 472 // Split Buffer 473 splitBuffer.io.in <> splitPipeline.io.out 474 splitBuffer.io.redirect <> io.redirect 475 io.out <> splitBuffer.io.out 476} 477 478class VSSplitImp(implicit p: Parameters) extends VLSUModule{ 479 val io = IO(new VSplitIO(isVStore=true)) 480 val splitPipeline = Module(new VSSplitPipelineImp()) 481 val splitBuffer = Module(new VSSplitBufferImp()) 482 // Split Pipeline 483 splitPipeline.io.in <> io.in 484 splitPipeline.io.redirect <> io.redirect 485 io.toMergeBuffer <> splitPipeline.io.toMergeBuffer 486 487 // Split Buffer 488 splitBuffer.io.in <> splitPipeline.io.out 489 splitBuffer.io.redirect <> io.redirect 490 io.out <> splitBuffer.io.out 491 io.vstd.get <> splitBuffer.io.vstd.get 492} 493 494