1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.backend.rob.RobPtr 26import xiangshan.backend.Bundles._ 27import xiangshan.mem._ 28import xiangshan.backend.fu.FuType 29import xiangshan.backend.fu.FuConfig._ 30import xiangshan.backend.datapath.NewPipelineConnect 31import freechips.rocketchip.diplomacy.BufferParams 32 33class MBufferBundle(implicit p: Parameters) extends VLSUBundle{ 34 val data = UInt(VLEN.W) 35 val mask = UInt(VLENB.W) 36 val flowNum = UInt(flowIdxBits.W) 37 val exceptionVec = ExceptionVec() 38 val uop = new DynInst 39 // val vdOffset = UInt(vOffsetBits.W) 40 val sourceType = VSFQFeedbackType() 41 val flushState = Bool() 42 val vdIdx = UInt(3.W) 43 // for exception 44 val vstart = UInt(elemIdxBits.W) 45 val vl = UInt(elemIdxBits.W) 46 val vaddr = UInt(XLEN.W) 47 val gpaddr = UInt(GPAddrBits.W) 48 val isForVSnonLeafPTE= Bool() 49 val fof = Bool() 50 val vlmax = UInt(elemIdxBits.W) 51 52 def allReady(): Bool = (flowNum === 0.U) 53} 54 55abstract class BaseVMergeBuffer(isVStore: Boolean=false)(implicit p: Parameters) extends VLSUModule{ 56 val io = IO(new VMergeBufferIO(isVStore)) 57 58 // freeliset: store valid entries index. 59 // +---+---+--------------+-----+-----+ 60 // | 0 | 1 | ...... | n-2 | n-1 | 61 // +---+---+--------------+-----+-----+ 62 val freeList: FreeList 63 val uopSize: Int 64 val enqWidth = io.fromSplit.length 65 val deqWidth = io.uopWriteback.length 66 val pipeWidth = io.fromPipeline.length 67 lazy val fuCfg = if (isVStore) VstuCfg else VlduCfg 68 69 def EnqConnect(source: MergeBufferReq, sink: MBufferBundle) = { 70 sink.data := source.data 71 sink.mask := source.mask 72 sink.flowNum := source.flowNum 73 sink.exceptionVec := ExceptionNO.selectByFu(0.U.asTypeOf(ExceptionVec()), fuCfg) 74 sink.uop := source.uop 75 sink.sourceType := 0.U.asTypeOf(VSFQFeedbackType()) 76 sink.flushState := false.B 77 sink.vdIdx := source.vdIdx 78 sink.fof := source.fof 79 sink.vlmax := source.vlmax 80 sink.vl := source.uop.vpu.vl 81 sink.vstart := 0.U 82 } 83 def DeqConnect(source: MBufferBundle): MemExuOutput = { 84 val sink = WireInit(0.U.asTypeOf(new MemExuOutput(isVector = true))) 85 sink.data := source.data 86 sink.mask.get := source.mask 87 sink.uop := source.uop 88 sink.uop.exceptionVec := ExceptionNO.selectByFu(source.exceptionVec, fuCfg) 89 sink.uop.vpu.vmask := source.mask 90 sink.debug := 0.U.asTypeOf(new DebugBundle) 91 sink.vdIdxInField.get := source.vdIdx // Mgu needs to use this. 92 sink.vdIdx.get := source.vdIdx 93 sink.uop.vpu.vstart := source.vstart 94 sink.uop.vpu.vl := source.vl 95 sink 96 } 97 def ToLsqConnect(source: MBufferBundle): FeedbackToLsqIO = { 98 val sink = WireInit(0.U.asTypeOf(new FeedbackToLsqIO)) 99 val hasExp = ExceptionNO.selectByFu(source.exceptionVec, fuCfg).asUInt.orR 100 sink.robidx := source.uop.robIdx 101 sink.uopidx := source.uop.uopIdx 102 sink.feedback(VecFeedbacks.COMMIT) := !hasExp 103 sink.feedback(VecFeedbacks.FLUSH) := hasExp 104 sink.feedback(VecFeedbacks.LAST) := true.B 105 sink.vstart := source.vstart // TODO: if lsq need vl for fof? 106 sink.vaddr := source.vaddr 107 sink.gpaddr := source.gpaddr 108 sink.isForVSnonLeafPTE := source.isForVSnonLeafPTE 109 sink.vl := source.vl 110 sink.exceptionVec := ExceptionNO.selectByFu(source.exceptionVec, fuCfg) 111 sink 112 } 113 114 115 val entries = Reg(Vec(uopSize, new MBufferBundle)) 116 val needCancel = WireInit(VecInit(Seq.fill(uopSize)(false.B))) 117 val allocated = RegInit(VecInit(Seq.fill(uopSize)(false.B))) 118 val freeMaskVec = WireInit(VecInit(Seq.fill(uopSize)(false.B))) 119 val uopFinish = RegInit(VecInit(Seq.fill(uopSize)(false.B))) 120 val needRSReplay = RegInit(VecInit(Seq.fill(uopSize)(false.B))) 121 // enq, from splitPipeline 122 // val allowEnqueue = 123 val cancelEnq = io.fromSplit.map(_.req.bits.uop.robIdx.needFlush(io.redirect)) 124 val canEnqueue = io.fromSplit.map(_.req.valid) 125 val needEnqueue = (0 until enqWidth).map{i => 126 canEnqueue(i) && !cancelEnq(i) 127 } 128 129 val freeCount = uopSize.U - freeList.io.validCount 130 131 for ((enq, i) <- io.fromSplit.zipWithIndex){ 132 freeList.io.doAllocate(i) := false.B 133 134 freeList.io.allocateReq(i) := true.B 135 136 val offset = PopCount(needEnqueue.take(i)) 137 val canAccept = freeList.io.canAllocate(offset) 138 val enqIndex = freeList.io.allocateSlot(offset) 139 enq.req.ready := freeCount >= (i + 1).U // for better timing 140 141 when(needEnqueue(i) && enq.req.ready){ 142 freeList.io.doAllocate(i) := true.B 143 // enqueue 144 allocated(enqIndex) := true.B 145 uopFinish(enqIndex) := false.B 146 needRSReplay(enqIndex) := false.B 147 148 EnqConnect(enq.req.bits, entries(enqIndex))// initial entry 149 } 150 151 enq.resp.bits.mBIndex := enqIndex 152 enq.resp.bits.fail := false.B 153 enq.resp.valid := freeCount >= (i + 1).U // for better timing 154 } 155 156 //redirect 157 for (i <- 0 until uopSize){ 158 needCancel(i) := entries(i).uop.robIdx.needFlush(io.redirect) && allocated(i) 159 when (needCancel(i)) { 160 allocated(i) := false.B 161 freeMaskVec(i) := true.B 162 uopFinish(i) := false.B 163 needRSReplay(i):= false.B 164 } 165 } 166 freeList.io.free := freeMaskVec.asUInt 167 //pipelineWriteback 168 // handle the situation where multiple ports are going to write the same uop queue entry 169 val mergePortMatrix = Wire(Vec(pipeWidth, Vec(pipeWidth, Bool()))) 170 val mergedByPrevPortVec = Wire(Vec(pipeWidth, Bool())) 171 (0 until pipeWidth).map{case i => (0 until pipeWidth).map{case j => 172 mergePortMatrix(i)(j) := (j == i).B || 173 (j > i).B && 174 io.fromPipeline(j).bits.mBIndex === io.fromPipeline(i).bits.mBIndex && 175 io.fromPipeline(j).valid 176 }} 177 (0 until pipeWidth).map{case i => 178 mergedByPrevPortVec(i) := (i != 0).B && Cat((0 until i).map(j => 179 io.fromPipeline(j).bits.mBIndex === io.fromPipeline(i).bits.mBIndex && 180 io.fromPipeline(j).valid)).orR 181 } 182 dontTouch(mergePortMatrix) 183 dontTouch(mergedByPrevPortVec) 184 185 // for exception, select exception, when multi port writeback exception, we need select oldest one 186 def selectOldest[T <: VecPipelineFeedbackIO](valid: Seq[Bool], bits: Seq[T], sel: Seq[UInt]): (Seq[Bool], Seq[T], Seq[UInt]) = { 187 assert(valid.length == bits.length) 188 assert(valid.length == sel.length) 189 if (valid.length == 0 || valid.length == 1) { 190 (valid, bits, sel) 191 } else if (valid.length == 2) { 192 val res = Seq.fill(2)(Wire(ValidIO(chiselTypeOf(bits(0))))) 193 for (i <- res.indices) { 194 res(i).valid := valid(i) 195 res(i).bits := bits(i) 196 } 197 val oldest = Mux(valid(0) && valid(1), 198 Mux(sel(0) < sel(1), 199 res(0), res(1)), 200 Mux(valid(0) && !valid(1), res(0), res(1))) 201 (Seq(oldest.valid), Seq(oldest.bits), Seq(0.U)) 202 } else { 203 val left = selectOldest(valid.take(valid.length / 2), bits.take(bits.length / 2), sel.take(sel.length / 2)) 204 val right = selectOldest(valid.takeRight(valid.length - (valid.length / 2)), bits.takeRight(bits.length - (bits.length / 2)), sel.takeRight(sel.length - (sel.length / 2))) 205 selectOldest(left._1 ++ right._1, left._2 ++ right._2, left._3 ++ right._3) 206 } 207 } 208 209 val pipeValid = io.fromPipeline.map(_.valid) 210 val pipeBits = io.fromPipeline.map(x => x.bits) 211 val wbElemIdx = pipeBits.map(_.elemIdx) 212 val wbMbIndex = pipeBits.map(_.mBIndex) 213 val wbElemIdxInField = wbElemIdx.zip(wbMbIndex).map(x => x._1 & (entries(x._2).vlmax - 1.U)) 214 215 val portHasExcp = pipeBits.zip(mergePortMatrix).map{case (port, v) => 216 (0 until pipeWidth).map{case i => 217 val pipeHasExcep = ExceptionNO.selectByFu(io.fromPipeline(i).bits.exceptionVec, fuCfg).asUInt.orR 218 (v(i) && pipeHasExcep && io.fromPipeline(i).bits.mask.orR) // this port have exception or merged port have exception 219 }.reduce(_ || _) 220 } 221 222 for((pipewb, i) <- io.fromPipeline.zipWithIndex){ 223 val entry = entries(wbMbIndex(i)) 224 val entryVeew = entry.uop.vpu.veew 225 val entryIsUS = LSUOpType.isAllUS(entry.uop.fuOpType) 226 val entryHasException = ExceptionNO.selectByFu(entry.exceptionVec, fuCfg).asUInt.orR 227 val entryExcp = entryHasException && entry.mask.orR 228 229 val sel = selectOldest(mergePortMatrix(i), pipeBits, wbElemIdxInField) 230 val selPort = sel._2 231 val selElemInfield = selPort(0).elemIdx & (entries(wbMbIndex(i)).vlmax - 1.U) 232 val selExceptionVec = selPort(0).exceptionVec 233 234 val isUSFirstUop = !selPort(0).elemIdx.orR 235 // Only the first unaligned uop of unit-stride needs to be offset. 236 // When unaligned, the lowest bit of mask is 0. 237 // example: 16'b1111_1111_1111_0000 238 val vaddrOffset = Mux(entryIsUS && isUSFirstUop, genVFirstUnmask(selPort(0).mask).asUInt, 0.U) 239 val vaddr = selPort(0).vaddr + vaddrOffset 240 241 // select oldest port to raise exception 242 when((((entries(wbMbIndex(i)).vstart >= selElemInfield) && entryExcp && portHasExcp(i)) || (!entryExcp && portHasExcp(i))) && pipewb.valid && !mergedByPrevPortVec(i)){ 243 when(!entries(wbMbIndex(i)).fof || selElemInfield === 0.U){ 244 // For fof loads, if element 0 raises an exception, vl is not modified, and the trap is taken. 245 entries(wbMbIndex(i)).vstart := selElemInfield 246 entries(wbMbIndex(i)).exceptionVec := ExceptionNO.selectByFu(selExceptionVec, fuCfg) 247 entries(wbMbIndex(i)).vaddr := vaddr 248 entries(wbMbIndex(i)).gpaddr := selPort(0).gpaddr 249 entries(wbMbIndex(i)).isForVSnonLeafPTE := selPort(0).isForVSnonLeafPTE 250 }.otherwise{ 251 entries(wbMbIndex(i)).vl := selElemInfield 252 } 253 } 254 } 255 256 // for pipeline writeback 257 for((pipewb, i) <- io.fromPipeline.zipWithIndex){ 258 val wbIndex = pipewb.bits.mBIndex 259 val flowNumOffset = Mux(pipewb.bits.usSecondInv, 260 2.U, 261 PopCount(mergePortMatrix(i))) 262 val sourceTypeNext = entries(wbIndex).sourceType | pipewb.bits.sourceType 263 val hasExp = ExceptionNO.selectByFu(pipewb.bits.exceptionVec, fuCfg).asUInt.orR 264 265 // if is VLoad, need latch 1 cycle to merge data. only flowNum and wbIndex need to latch 266 val latchWbValid = if(isVStore) pipewb.valid else RegNext(pipewb.valid) 267 val latchWbIndex = if(isVStore) wbIndex else RegEnable(wbIndex, pipewb.valid) 268 val latchFlowNum = if(isVStore) flowNumOffset else RegEnable(flowNumOffset, pipewb.valid) 269 val latchMergeByPre = if(isVStore) mergedByPrevPortVec(i) else RegEnable(mergedByPrevPortVec(i), pipewb.valid) 270 when(latchWbValid && !latchMergeByPre){ 271 entries(latchWbIndex).flowNum := entries(latchWbIndex).flowNum - latchFlowNum 272 } 273 274 when(pipewb.valid){ 275 entries(wbIndex).sourceType := sourceTypeNext 276 entries(wbIndex).flushState := pipewb.bits.flushState 277 } 278 when(pipewb.valid && !pipewb.bits.hit){ 279 needRSReplay(wbIndex) := true.B 280 } 281 pipewb.ready := true.B 282 XSError((entries(latchWbIndex).flowNum - latchFlowNum > entries(latchWbIndex).flowNum) && latchWbValid && !latchMergeByPre, "FlowWriteback overflow!!\n") 283 XSError(!allocated(latchWbIndex) && latchWbValid, "Writeback error flow!!\n") 284 } 285 // for inorder mem asscess 286 io.toSplit := DontCare 287 288 //uopwriteback(deq) 289 for (i <- 0 until uopSize){ 290 when(allocated(i) && entries(i).allReady()){ 291 uopFinish(i) := true.B 292 } 293 } 294 val selPolicy = SelectOne("circ", uopFinish, deqWidth) // select one entry to deq 295 private val pipelineOut = Wire(Vec(deqWidth, DecoupledIO(new MemExuOutput(isVector = true)))) 296 private val writeBackOut = Wire(Vec(deqWidth, DecoupledIO(new MemExuOutput(isVector = true)))) 297 private val writeBackOutExceptionVec = writeBackOut.map(_.bits.uop.exceptionVec) 298 for(((port, lsqport), i) <- (pipelineOut zip io.toLsq).zipWithIndex){ 299 val canGo = port.ready 300 val (selValid, selOHVec) = selPolicy.getNthOH(i + 1) 301 val entryIdx = OHToUInt(selOHVec) 302 val selEntry = entries(entryIdx) 303 val selAllocated = allocated(entryIdx) 304 val selFire = selValid && canGo 305 when(selFire){ 306 freeMaskVec(entryIdx) := selAllocated 307 allocated(entryIdx) := false.B 308 uopFinish(entryIdx) := false.B 309 needRSReplay(entryIdx):= false.B 310 } 311 //writeback connect 312 port.valid := selFire && selAllocated && !needRSReplay(entryIdx) && !selEntry.uop.robIdx.needFlush(io.redirect) 313 port.bits := DeqConnect(selEntry) 314 //to lsq 315 lsqport.bits := ToLsqConnect(selEntry) // when uopwriteback, free MBuffer entry, write to lsq 316 lsqport.valid:= selFire && selAllocated && !needRSReplay(entryIdx) 317 //to RS 318 val feedbackOut = WireInit(0.U.asTypeOf(io.feedback(i).bits)).suggestName(s"feedbackOut_${i}") 319 val feedbackValid = selFire && selAllocated 320 feedbackOut.hit := !needRSReplay(entryIdx) 321 feedbackOut.robIdx := selEntry.uop.robIdx 322 feedbackOut.sourceType := selEntry.sourceType 323 feedbackOut.flushState := selEntry.flushState 324 feedbackOut.dataInvalidSqIdx := DontCare 325 feedbackOut.sqIdx := selEntry.uop.sqIdx 326 feedbackOut.lqIdx := selEntry.uop.lqIdx 327 328 io.feedback(i).valid := RegNext(feedbackValid) 329 io.feedback(i).bits := RegEnable(feedbackOut, feedbackValid) 330 331 NewPipelineConnect( 332 port, writeBackOut(i), writeBackOut(i).fire, 333 Mux(port.fire, 334 selEntry.uop.robIdx.needFlush(io.redirect), 335 writeBackOut(i).bits.uop.robIdx.needFlush(io.redirect)), 336 Option(s"VMergebufferPipelineConnect${i}") 337 ) 338 io.uopWriteback(i) <> writeBackOut(i) 339 io.uopWriteback(i).bits.uop.exceptionVec := ExceptionNO.selectByFu(writeBackOutExceptionVec(i), fuCfg) 340 } 341 342 QueuePerf(uopSize, freeList.io.validCount, freeList.io.validCount === 0.U) 343} 344 345class VLMergeBufferImp(implicit p: Parameters) extends BaseVMergeBuffer(isVStore=false){ 346 override lazy val uopSize = VlMergeBufferSize 347 println(s"VLMergeBuffer Size: ${VlMergeBufferSize}") 348 override lazy val freeList = Module(new FreeList( 349 size = uopSize, 350 allocWidth = VecLoadPipelineWidth, 351 freeWidth = deqWidth, 352 enablePreAlloc = false, 353 moduleName = "VLoad MergeBuffer freelist" 354 )) 355 356 //merge data 357 val flowWbElemIdx = Wire(Vec(pipeWidth, UInt(elemIdxBits.W))) 358 val flowWbElemIdxInVd = Wire(Vec(pipeWidth, UInt(elemIdxBits.W))) 359 val pipewbValidReg = Wire(Vec(pipeWidth, Bool())) 360 val wbIndexReg = Wire(Vec(pipeWidth, UInt(vlmBindexBits.W))) 361 val mergeDataReg = Wire(Vec(pipeWidth, UInt(VLEN.W))) 362 363 for((pipewb, i) <- io.fromPipeline.zipWithIndex){ 364 /** step0 **/ 365 val wbIndex = pipewb.bits.mBIndex 366 val alignedType = pipewb.bits.alignedType 367 val elemIdxInsideVd = pipewb.bits.elemIdxInsideVd 368 flowWbElemIdx(i) := pipewb.bits.elemIdx 369 flowWbElemIdxInVd(i) := elemIdxInsideVd.get 370 371 val oldData = PriorityMux(Seq( 372 (pipewbValidReg(0) && (wbIndexReg(0) === wbIndex)) -> mergeDataReg(0), 373 (pipewbValidReg(1) && (wbIndexReg(1) === wbIndex)) -> mergeDataReg(1), 374 (pipewbValidReg(2) && (wbIndexReg(2) === wbIndex)) -> mergeDataReg(2), 375 true.B -> entries(wbIndex).data // default use entries_data 376 )) 377 val mergedData = mergeDataWithElemIdx( 378 oldData = oldData, 379 newData = io.fromPipeline.map(_.bits.vecdata.get), 380 alignedType = alignedType(1,0), 381 elemIdx = flowWbElemIdxInVd, 382 valids = mergePortMatrix(i) 383 ) 384 /* this only for unit-stride load data merge 385 * cycle0: broden 128-bits to 256-bits (max 6 to 1) 386 * cycle1: select 128-bits data from 256-bits (16 to 1) 387 */ 388 val (brodenMergeData, brodenMergeMask) = mergeDataByIndex( 389 data = io.fromPipeline.map(_.bits.vecdata.get).drop(i), 390 mask = io.fromPipeline.map(_.bits.mask).drop(i), 391 index = io.fromPipeline(i).bits.elemIdxInsideVd.get, 392 valids = mergePortMatrix(i).drop(i) 393 ) 394 /** step1 **/ 395 pipewbValidReg(i) := RegNext(pipewb.valid) 396 wbIndexReg(i) := RegEnable(wbIndex, pipewb.valid) 397 mergeDataReg(i) := RegEnable(mergedData, pipewb.valid) // for not Unit-stride 398 val brodenMergeDataReg = RegEnable(brodenMergeData, pipewb.valid) // only for Unit-stride 399 val brodenMergeMaskReg = RegEnable(brodenMergeMask, pipewb.valid) 400 val mergedByPrevPortReg = RegEnable(mergedByPrevPortVec(i), pipewb.valid) 401 val regOffsetReg = RegEnable(pipewb.bits.reg_offset.get, pipewb.valid) // only for Unit-stride 402 val isusMerge = RegEnable(alignedType(2), pipewb.valid) 403 404 val usSelData = Mux1H(UIntToOH(regOffsetReg), (0 until VLENB).map{case i => getNoAlignedSlice(brodenMergeDataReg, i, 128)}) 405 val usSelMask = Mux1H(UIntToOH(regOffsetReg), (0 until VLENB).map{case i => brodenMergeMaskReg(16 + i - 1, i)}) 406 val usMergeData = mergeDataByByte(entries(wbIndexReg(i)).data, usSelData, usSelMask) 407 when(pipewbValidReg(i) && !mergedByPrevPortReg){ 408 entries(wbIndexReg(i)).data := Mux(isusMerge, usMergeData, mergeDataReg(i)) // if aligned(2) == 1, is Unit-Stride inst 409 } 410 } 411} 412 413class VSMergeBufferImp(implicit p: Parameters) extends BaseVMergeBuffer(isVStore=true){ 414 override lazy val uopSize = VsMergeBufferSize 415 println(s"VSMergeBuffer Size: ${VsMergeBufferSize}") 416 override lazy val freeList = Module(new FreeList( 417 size = uopSize, 418 allocWidth = VecStorePipelineWidth, 419 freeWidth = deqWidth, 420 enablePreAlloc = false, 421 moduleName = "VStore MergeBuffer freelist" 422 )) 423 override def DeqConnect(source: MBufferBundle): MemExuOutput = { 424 val sink = Wire(new MemExuOutput(isVector = true)) 425 sink.data := DontCare 426 sink.mask.get := DontCare 427 sink.uop := source.uop 428 sink.uop.exceptionVec := source.exceptionVec 429 sink.debug := 0.U.asTypeOf(new DebugBundle) 430 sink.vdIdxInField.get := DontCare 431 sink.vdIdx.get := DontCare 432 sink.uop.vpu.vstart := source.vstart 433 sink 434 } 435} 436