1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.backend.rob.RobPtr 26import xiangshan.backend.Bundles._ 27import xiangshan.ExceptionNO._ 28import xiangshan.mem._ 29import xiangshan.backend.fu.FuType 30import xiangshan.backend.fu.FuConfig._ 31import xiangshan.backend.datapath.NewPipelineConnect 32import freechips.rocketchip.diplomacy.BufferParams 33import xiangshan.backend.fu.vector.Bundles.VType 34 35class MBufferBundle(implicit p: Parameters) extends VLSUBundle{ 36 val data = UInt(VLEN.W) 37 val mask = UInt(VLENB.W) 38 val flowNum = UInt(flowIdxBits.W) 39 val exceptionVec = ExceptionVec() 40 val uop = new DynInst 41 // val vdOffset = UInt(vOffsetBits.W) 42 val sourceType = VSFQFeedbackType() 43 val flushState = Bool() 44 val vdIdx = UInt(3.W) 45 val elemIdx = UInt(elemIdxBits.W) // element index 46 // for exception 47 val vstart = UInt(elemIdxBits.W) 48 val vl = UInt(elemIdxBits.W) 49 val vaNeedExt = Bool() 50 val vaddr = UInt(XLEN.W) 51 val gpaddr = UInt(GPAddrBits.W) 52 val isForVSnonLeafPTE= Bool() 53 val fof = Bool() 54 val vlmax = UInt(elemIdxBits.W) 55 56 def allReady(): Bool = (flowNum === 0.U) 57} 58 59abstract class BaseVMergeBuffer(isVStore: Boolean=false)(implicit p: Parameters) extends VLSUModule{ 60 val io = IO(new VMergeBufferIO(isVStore)) 61 62 // freeliset: store valid entries index. 63 // +---+---+--------------+-----+-----+ 64 // | 0 | 1 | ...... | n-2 | n-1 | 65 // +---+---+--------------+-----+-----+ 66 val freeList: FreeList 67 val uopSize: Int 68 val enqWidth = io.fromSplit.length 69 val deqWidth = io.uopWriteback.length 70 val pipeWidth = io.fromPipeline.length 71 lazy val fuCfg = if (isVStore) VstuCfg else VlduCfg 72 73 def EnqConnect(source: MergeBufferReq, sink: MBufferBundle) = { 74 sink.data := source.data 75 sink.mask := source.mask 76 sink.flowNum := source.flowNum 77 sink.exceptionVec := ExceptionNO.selectByFu(0.U.asTypeOf(ExceptionVec()), fuCfg) 78 sink.uop := source.uop 79 sink.sourceType := 0.U.asTypeOf(VSFQFeedbackType()) 80 sink.flushState := false.B 81 sink.vdIdx := source.vdIdx 82 sink.elemIdx := Fill(elemIdxBits, 1.U) 83 sink.fof := source.fof 84 sink.vlmax := source.vlmax 85 sink.vl := source.uop.vpu.vl 86 sink.vaddr := source.vaddr 87 sink.vstart := 0.U 88 } 89 def DeqConnect(source: MBufferBundle): MemExuOutput = { 90 val sink = WireInit(0.U.asTypeOf(new MemExuOutput(isVector = true))) 91 sink.data := source.data 92 sink.mask.get := source.mask 93 sink.uop := source.uop 94 sink.uop.exceptionVec := ExceptionNO.selectByFu(source.exceptionVec, fuCfg) 95 sink.uop.vpu.vmask := source.mask 96 sink.debug := 0.U.asTypeOf(new DebugBundle) 97 sink.vdIdxInField.get := source.vdIdx // Mgu needs to use this. 98 sink.vdIdx.get := source.vdIdx 99 sink.uop.vpu.vstart := source.vstart 100 sink.uop.vpu.vl := source.vl 101 sink 102 } 103 def ToLsqConnect(source: MBufferBundle): FeedbackToLsqIO = { 104 val sink = WireInit(0.U.asTypeOf(new FeedbackToLsqIO)) 105 val hasExp = ExceptionNO.selectByFu(source.exceptionVec, fuCfg).asUInt.orR 106 sink.robidx := source.uop.robIdx 107 sink.uopidx := source.uop.uopIdx 108 sink.feedback(VecFeedbacks.COMMIT) := !hasExp 109 sink.feedback(VecFeedbacks.FLUSH) := hasExp 110 sink.feedback(VecFeedbacks.LAST) := true.B 111 sink.vstart := source.vstart // TODO: if lsq need vl for fof? 112 sink.vaddr := source.vaddr 113 sink.vaNeedExt := source.vaNeedExt 114 sink.gpaddr := source.gpaddr 115 sink.isForVSnonLeafPTE := source.isForVSnonLeafPTE 116 sink.vl := source.vl 117 sink.exceptionVec := ExceptionNO.selectByFu(source.exceptionVec, fuCfg) 118 sink 119 } 120 121 122 val entries = Reg(Vec(uopSize, new MBufferBundle)) 123 val needCancel = WireInit(VecInit(Seq.fill(uopSize)(false.B))) 124 val allocated = RegInit(VecInit(Seq.fill(uopSize)(false.B))) 125 val freeMaskVec = WireInit(VecInit(Seq.fill(uopSize)(false.B))) 126 val uopFinish = RegInit(VecInit(Seq.fill(uopSize)(false.B))) 127 val needRSReplay = RegInit(VecInit(Seq.fill(uopSize)(false.B))) 128 // enq, from splitPipeline 129 // val allowEnqueue = 130 val cancelEnq = io.fromSplit.map(_.req.bits.uop.robIdx.needFlush(io.redirect)) 131 val canEnqueue = io.fromSplit.map(_.req.valid) 132 val needEnqueue = (0 until enqWidth).map{i => 133 canEnqueue(i) && !cancelEnq(i) 134 } 135 136 val freeCount = uopSize.U - freeList.io.validCount 137 138 for ((enq, i) <- io.fromSplit.zipWithIndex){ 139 freeList.io.doAllocate(i) := false.B 140 141 freeList.io.allocateReq(i) := true.B 142 143 val offset = PopCount(needEnqueue.take(i)) 144 val canAccept = freeList.io.canAllocate(offset) 145 val enqIndex = freeList.io.allocateSlot(offset) 146 enq.req.ready := freeCount >= (i + 1).U // for better timing 147 148 when(needEnqueue(i) && enq.req.ready){ 149 freeList.io.doAllocate(i) := true.B 150 // enqueue 151 allocated(enqIndex) := true.B 152 uopFinish(enqIndex) := false.B 153 needRSReplay(enqIndex) := false.B 154 155 EnqConnect(enq.req.bits, entries(enqIndex))// initial entry 156 } 157 158 enq.resp.bits.mBIndex := enqIndex 159 enq.resp.bits.fail := false.B 160 enq.resp.valid := freeCount >= (i + 1).U // for better timing 161 } 162 163 //redirect 164 for (i <- 0 until uopSize){ 165 needCancel(i) := entries(i).uop.robIdx.needFlush(io.redirect) && allocated(i) 166 when (needCancel(i)) { 167 allocated(i) := false.B 168 freeMaskVec(i) := true.B 169 uopFinish(i) := false.B 170 needRSReplay(i):= false.B 171 } 172 } 173 freeList.io.free := freeMaskVec.asUInt 174 //pipelineWriteback 175 // handle the situation where multiple ports are going to write the same uop queue entry 176 // select the oldest exception and count the flownum of the pipeline writeback. 177 val mergePortMatrix = Wire(Vec(pipeWidth, Vec(pipeWidth, Bool()))) 178 val mergePortMatrixHasExcp = Wire(Vec(pipeWidth, Vec(pipeWidth, Bool()))) 179 val mergedByPrevPortVec = Wire(Vec(pipeWidth, Bool())) 180 (0 until pipeWidth).map{case i => (0 until pipeWidth).map{case j => 181 val mergePortValid = (j == i).B || 182 (j > i).B && 183 io.fromPipeline(j).bits.mBIndex === io.fromPipeline(i).bits.mBIndex && 184 io.fromPipeline(j).valid 185 186 mergePortMatrix(i)(j) := mergePortValid 187 mergePortMatrixHasExcp(i)(j) := mergePortValid && io.fromPipeline(j).bits.hasException 188 }} 189 (0 until pipeWidth).map{case i => 190 mergedByPrevPortVec(i) := (i != 0).B && Cat((0 until i).map(j => 191 io.fromPipeline(j).bits.mBIndex === io.fromPipeline(i).bits.mBIndex && 192 io.fromPipeline(j).valid)).orR 193 } 194 195 if (backendParams.debugEn){ 196 dontTouch(mergePortMatrix) 197 dontTouch(mergedByPrevPortVec) 198 } 199 200 // for exception, select exception, when multi port writeback exception, we need select oldest one 201 def selectOldest[T <: VecPipelineFeedbackIO](valid: Seq[Bool], bits: Seq[T], sel: Seq[UInt]): (Seq[Bool], Seq[T], Seq[UInt]) = { 202 assert(valid.length == bits.length) 203 assert(valid.length == sel.length) 204 if (valid.length == 0 || valid.length == 1) { 205 (valid, bits, sel) 206 } else if (valid.length == 2) { 207 val res = Seq.fill(2)(Wire(ValidIO(chiselTypeOf(bits(0))))) 208 for (i <- res.indices) { 209 res(i).valid := valid(i) 210 res(i).bits := bits(i) 211 } 212 val oldest = Mux(valid(0) && valid(1), 213 Mux(sel(0) < sel(1), 214 res(0), res(1)), 215 Mux(valid(0) && !valid(1), res(0), res(1))) 216 217 val oldidx = Mux(valid(0) && valid(1), 218 Mux(sel(0) < sel(1), 219 sel(0), sel(1)), 220 Mux(valid(0) && !valid(1), sel(0), sel(1))) 221 (Seq(oldest.valid), Seq(oldest.bits), Seq(oldidx)) 222 } else { 223 val left = selectOldest(valid.take(valid.length / 2), bits.take(bits.length / 2), sel.take(sel.length / 2)) 224 val right = selectOldest(valid.takeRight(valid.length - (valid.length / 2)), bits.takeRight(bits.length - (bits.length / 2)), sel.takeRight(sel.length - (sel.length / 2))) 225 selectOldest(left._1 ++ right._1, left._2 ++ right._2, left._3 ++ right._3) 226 } 227 } 228 229 val pipeValid = io.fromPipeline.map(_.valid) 230 val pipeBits = io.fromPipeline.map(_.bits) 231 val wbElemIdx = pipeBits.map(_.elemIdx) 232 val wbMbIndex = pipeBits.map(_.mBIndex) 233 val wbElemIdxInField = wbElemIdx.zip(wbMbIndex).map(x => x._1 & (entries(x._2).vlmax - 1.U)) 234 235 // this port have exception or merged port have exception 236 val portHasExcp = mergePortMatrixHasExcp.map{_.reduce(_ || _)} 237 238 for((pipewb, i) <- io.fromPipeline.zipWithIndex){ 239 val entry = entries(wbMbIndex(i)) 240 val entryVeew = entry.uop.vpu.veew 241 val entryIsUS = LSUOpType.isAllUS(entry.uop.fuOpType) 242 val entryHasException = ExceptionNO.selectByFu(entry.exceptionVec, fuCfg).asUInt.orR 243 val entryExcp = entryHasException && entry.mask.orR 244 val entryVaddr = entry.vaddr 245 val entryVstart = entry.vstart 246 val entryElemIdx = entry.elemIdx 247 248 val sel = selectOldest(mergePortMatrixHasExcp(i), pipeBits, wbElemIdxInField) 249 val selPort = sel._2 250 val selElemInfield = selPort(0).elemIdx & (entries(wbMbIndex(i)).vlmax - 1.U) 251 val selExceptionVec = selPort(0).exceptionVec 252 val selVaddr = selPort(0).vaddr 253 val selElemIdx = selPort(0).elemIdx 254 255 val isUSFirstUop = !selPort(0).elemIdx.orR 256 // Only the first unaligned uop of unit-stride needs to be offset. 257 // When unaligned, the lowest bit of mask is 0. 258 // example: 16'b1111_1111_1111_0000 259 val firstUnmask = genVFirstUnmask(selPort(0).mask).asUInt 260 val vaddrOffset = Mux(entryIsUS, firstUnmask, 0.U) 261 val vaddr = selVaddr + vaddrOffset 262 val vstart = Mux(entryIsUS, selPort(0).vstart, selElemInfield) 263 264 // select oldest port to raise exception 265 when((((entryElemIdx >= selElemIdx) && entryExcp && portHasExcp(i)) || (!entryExcp && portHasExcp(i))) && pipewb.valid && !mergedByPrevPortVec(i)) { 266 entry.uop.trigger := selPort(0).trigger 267 entry.elemIdx := selElemIdx 268 when(!entry.fof || vstart === 0.U){ 269 // For fof loads, if element 0 raises an exception, vl is not modified, and the trap is taken. 270 entry.vstart := vstart 271 entry.exceptionVec := ExceptionNO.selectByFu(selExceptionVec, fuCfg) 272 entry.vaddr := vaddr 273 entry.vaNeedExt := selPort(0).vaNeedExt 274 entry.gpaddr := selPort(0).gpaddr 275 entry.isForVSnonLeafPTE := selPort(0).isForVSnonLeafPTE 276 }.otherwise{ 277 entry.uop.vpu.vta := VType.tu 278 entry.vl := Mux(entry.vl < vstart, entry.vl, vstart) 279 } 280 } 281 } 282 283 // for pipeline writeback 284 for((pipewb, i) <- io.fromPipeline.zipWithIndex){ 285 val wbIndex = pipewb.bits.mBIndex 286 val flowNumOffset = PopCount(mergePortMatrix(i)) 287 val sourceTypeNext = entries(wbIndex).sourceType | pipewb.bits.sourceType 288 val hasExp = ExceptionNO.selectByFu(pipewb.bits.exceptionVec, fuCfg).asUInt.orR 289 290 // if is VLoad, need latch 1 cycle to merge data. only flowNum and wbIndex need to latch 291 val latchWbValid = if(isVStore) pipewb.valid else RegNext(pipewb.valid) 292 val latchWbIndex = if(isVStore) wbIndex else RegEnable(wbIndex, pipewb.valid) 293 val latchFlowNum = if(isVStore) flowNumOffset else RegEnable(flowNumOffset, pipewb.valid) 294 val latchMergeByPre = if(isVStore) mergedByPrevPortVec(i) else RegEnable(mergedByPrevPortVec(i), pipewb.valid) 295 when(latchWbValid && !latchMergeByPre){ 296 entries(latchWbIndex).flowNum := entries(latchWbIndex).flowNum - latchFlowNum 297 } 298 299 when(pipewb.valid){ 300 entries(wbIndex).sourceType := sourceTypeNext 301 entries(wbIndex).flushState := pipewb.bits.flushState 302 } 303 when(pipewb.valid && !pipewb.bits.hit){ 304 needRSReplay(wbIndex) := true.B 305 } 306 pipewb.ready := true.B 307 XSError((entries(latchWbIndex).flowNum - latchFlowNum > entries(latchWbIndex).flowNum) && latchWbValid && !latchMergeByPre, s"entry: $latchWbIndex, FlowWriteback overflow!!\n") 308 XSError(!allocated(latchWbIndex) && latchWbValid, s"entry: $latchWbIndex, Writeback error flow!!\n") 309 } 310 311 //uopwriteback(deq) 312 for (i <- 0 until uopSize){ 313 when(allocated(i) && entries(i).allReady() && !needCancel(i)){ 314 uopFinish(i) := true.B 315 } 316 } 317 val selPolicy = SelectOne("circ", uopFinish, deqWidth) // select one entry to deq 318 private val pipelineOut = Wire(Vec(deqWidth, DecoupledIO(new MemExuOutput(isVector = true)))) 319 private val writeBackOut = Wire(Vec(deqWidth, DecoupledIO(new MemExuOutput(isVector = true)))) 320 private val writeBackOutExceptionVec = writeBackOut.map(_.bits.uop.exceptionVec) 321 for(((port, lsqport), i) <- (pipelineOut zip io.toLsq).zipWithIndex){ 322 val canGo = port.ready 323 val (selValid, selOHVec) = selPolicy.getNthOH(i + 1) 324 val entryIdx = OHToUInt(selOHVec) 325 val selEntry = entries(entryIdx) 326 val selAllocated = allocated(entryIdx) 327 val selFire = selValid && canGo 328 when(selFire){ 329 freeMaskVec(entryIdx) := selAllocated 330 allocated(entryIdx) := false.B 331 uopFinish(entryIdx) := false.B 332 needRSReplay(entryIdx):= false.B 333 } 334 //writeback connect 335 port.valid := selFire && selAllocated && !needRSReplay(entryIdx) && !selEntry.uop.robIdx.needFlush(io.redirect) 336 port.bits := DeqConnect(selEntry) 337 //to lsq 338 lsqport.bits := ToLsqConnect(selEntry) // when uopwriteback, free MBuffer entry, write to lsq 339 lsqport.valid:= selFire && selAllocated && !needRSReplay(entryIdx) 340 //to RS 341 val feedbackOut = WireInit(0.U.asTypeOf(io.feedback(i).bits)).suggestName(s"feedbackOut_${i}") 342 val feedbackValid = selFire && selAllocated 343 feedbackOut.hit := !needRSReplay(entryIdx) 344 feedbackOut.robIdx := selEntry.uop.robIdx 345 feedbackOut.sourceType := selEntry.sourceType 346 feedbackOut.flushState := selEntry.flushState 347 feedbackOut.dataInvalidSqIdx := DontCare 348 feedbackOut.sqIdx := selEntry.uop.sqIdx 349 feedbackOut.lqIdx := selEntry.uop.lqIdx 350 351 io.feedback(i).valid := RegNext(feedbackValid) 352 io.feedback(i).bits := RegEnable(feedbackOut, feedbackValid) 353 354 NewPipelineConnect( 355 port, writeBackOut(i), writeBackOut(i).fire, 356 Mux(port.fire, 357 selEntry.uop.robIdx.needFlush(io.redirect), 358 writeBackOut(i).bits.uop.robIdx.needFlush(io.redirect)), 359 Option(s"VMergebufferPipelineConnect${i}") 360 ) 361 io.uopWriteback(i) <> writeBackOut(i) 362 io.uopWriteback(i).bits.uop.exceptionVec := ExceptionNO.selectByFu(writeBackOutExceptionVec(i), fuCfg) 363 } 364 365 QueuePerf(uopSize, freeList.io.validCount, freeList.io.validCount === 0.U) 366} 367 368class VLMergeBufferImp(implicit p: Parameters) extends BaseVMergeBuffer(isVStore=false){ 369 override lazy val uopSize = VlMergeBufferSize 370 println(s"VLMergeBuffer Size: ${VlMergeBufferSize}") 371 override lazy val freeList = Module(new FreeList( 372 size = uopSize, 373 allocWidth = VecLoadPipelineWidth, 374 freeWidth = deqWidth, 375 enablePreAlloc = false, 376 moduleName = "VLoad MergeBuffer freelist" 377 )) 378 io.toSplit.get.threshold := freeCount <= 6.U 379 380 //merge data 381 val flowWbElemIdx = Wire(Vec(pipeWidth, UInt(elemIdxBits.W))) 382 val flowWbElemIdxInVd = Wire(Vec(pipeWidth, UInt(elemIdxBits.W))) 383 val pipewbValidReg = Wire(Vec(pipeWidth, Bool())) 384 val wbIndexReg = Wire(Vec(pipeWidth, UInt(vlmBindexBits.W))) 385 val mergeDataReg = Wire(Vec(pipeWidth, UInt(VLEN.W))) 386 387 val maskWithexceptionMask = io.fromPipeline.map{ x=> 388 Mux( 389 TriggerAction.isExp(x.bits.trigger) || TriggerAction.isDmode(x.bits.trigger), 390 ~x.bits.vecTriggerMask, 391 Fill(x.bits.mask.getWidth, !ExceptionNO.selectByFuAndUnSelect(x.bits.exceptionVec, fuCfg, Seq(breakPoint)).asUInt.orR) 392 ).asUInt & x.bits.mask 393 } 394 395 for((pipewb, i) <- io.fromPipeline.zipWithIndex){ 396 /** step0 **/ 397 val wbIndex = pipewb.bits.mBIndex 398 val alignedType = pipewb.bits.alignedType 399 val elemIdxInsideVd = pipewb.bits.elemIdxInsideVd 400 flowWbElemIdx(i) := pipewb.bits.elemIdx 401 flowWbElemIdxInVd(i) := elemIdxInsideVd.get 402 403 val oldData = PriorityMux(Seq( 404 (pipewbValidReg(0) && (wbIndexReg(0) === wbIndex)) -> mergeDataReg(0), 405 (pipewbValidReg(1) && (wbIndexReg(1) === wbIndex)) -> mergeDataReg(1), 406 (pipewbValidReg(2) && (wbIndexReg(2) === wbIndex)) -> mergeDataReg(2), 407 true.B -> entries(wbIndex).data // default use entries_data 408 )) 409 val mergedData = mergeDataWithElemIdx( 410 oldData = oldData, 411 newData = io.fromPipeline.map(_.bits.vecdata.get), 412 alignedType = alignedType(1,0), 413 elemIdx = flowWbElemIdxInVd, 414 valids = mergePortMatrix(i) 415 ) 416 /* this only for unit-stride load data merge 417 * cycle0: broden 128-bits to 256-bits (max 6 to 1) 418 * cycle1: select 128-bits data from 256-bits (16 to 1) 419 */ 420 val (brodenMergeData, brodenMergeMask) = mergeDataByIndex( 421 data = io.fromPipeline.map(_.bits.vecdata.get).drop(i), 422 mask = maskWithexceptionMask.drop(i), 423 index = io.fromPipeline(i).bits.elemIdxInsideVd.get, 424 valids = mergePortMatrix(i).drop(i) 425 ) 426 /** step1 **/ 427 pipewbValidReg(i) := RegNext(pipewb.valid) 428 wbIndexReg(i) := RegEnable(wbIndex, pipewb.valid) 429 mergeDataReg(i) := RegEnable(mergedData, pipewb.valid) // for not Unit-stride 430 val brodenMergeDataReg = RegEnable(brodenMergeData, pipewb.valid) // only for Unit-stride 431 val brodenMergeMaskReg = RegEnable(brodenMergeMask, pipewb.valid) 432 val mergedByPrevPortReg = RegEnable(mergedByPrevPortVec(i), pipewb.valid) 433 val regOffsetReg = RegEnable(pipewb.bits.reg_offset.get, pipewb.valid) // only for Unit-stride 434 val isusMerge = RegEnable(alignedType(2), pipewb.valid) 435 436 val usSelData = Mux1H(UIntToOH(regOffsetReg), (0 until VLENB).map{case i => getNoAlignedSlice(brodenMergeDataReg, i, 128)}) 437 val usSelMask = Mux1H(UIntToOH(regOffsetReg), (0 until VLENB).map{case i => brodenMergeMaskReg(16 + i - 1, i)}) 438 val usMergeData = mergeDataByByte(entries(wbIndexReg(i)).data, usSelData, usSelMask) 439 when(pipewbValidReg(i) && !mergedByPrevPortReg){ 440 entries(wbIndexReg(i)).data := Mux(isusMerge, usMergeData, mergeDataReg(i)) // if aligned(2) == 1, is Unit-Stride inst 441 } 442 } 443} 444 445class VSMergeBufferImp(implicit p: Parameters) extends BaseVMergeBuffer(isVStore=true){ 446 override lazy val uopSize = VsMergeBufferSize 447 println(s"VSMergeBuffer Size: ${VsMergeBufferSize}") 448 override lazy val freeList = Module(new FreeList( 449 size = uopSize, 450 allocWidth = VecStorePipelineWidth, 451 freeWidth = deqWidth, 452 enablePreAlloc = false, 453 moduleName = "VStore MergeBuffer freelist" 454 )) 455 override def DeqConnect(source: MBufferBundle): MemExuOutput = { 456 val sink = Wire(new MemExuOutput(isVector = true)) 457 sink.data := DontCare 458 sink.mask.get := DontCare 459 sink.uop := source.uop 460 sink.uop.exceptionVec := source.exceptionVec 461 sink.debug := 0.U.asTypeOf(new DebugBundle) 462 sink.vdIdxInField.get := DontCare 463 sink.vdIdx.get := DontCare 464 sink.isFromLoadUnit := DontCare 465 sink.uop.vpu.vstart := source.vstart 466 sink 467 } 468 469 // from misalignBuffer flush 470 when(io.fromMisalignBuffer.get.flush){ 471 needRSReplay(io.fromMisalignBuffer.get.mbIndex) := true.B 472 } 473} 474