1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.backend.Bundles.{DynInst, MemExuOutput} 26import xiangshan.cache._ 27import xiangshan.cache.{DCacheWordIO, DCacheLineIO, MemoryOpConstants} 28import xiangshan.cache.mmu.{TlbRequestIO, TlbHintIO} 29import xiangshan.mem._ 30import xiangshan.backend._ 31import xiangshan.backend.rob.RobLsqIO 32import coupledL2.{CMOReq, CMOResp} 33import xiangshan.backend.fu.FuType 34 35class ExceptionAddrIO(implicit p: Parameters) extends XSBundle { 36 val isStore = Input(Bool()) 37 val vaddr = Output(UInt(XLEN.W)) 38 val vaNeedExt = Output(Bool()) 39 val isHyper = Output(Bool()) 40 val vstart = Output(UInt((log2Up(VLEN) + 1).W)) 41 val vl = Output(UInt((log2Up(VLEN) + 1).W)) 42 val gpaddr = Output(UInt(XLEN.W)) 43 val isForVSnonLeafPTE = Output(Bool()) 44} 45 46class FwdEntry extends Bundle { 47 val validFast = Bool() // validFast is generated the same cycle with query 48 val valid = Bool() // valid is generated 1 cycle after query request 49 val data = UInt(8.W) // data is generated 1 cycle after query request 50} 51 52// inflight miss block reqs 53class InflightBlockInfo(implicit p: Parameters) extends XSBundle { 54 val block_addr = UInt(PAddrBits.W) 55 val valid = Bool() 56} 57 58class LsqEnqIO(implicit p: Parameters) extends MemBlockBundle { 59 val canAccept = Output(Bool()) 60 val needAlloc = Vec(LSQEnqWidth, Input(UInt(2.W))) 61 val req = Vec(LSQEnqWidth, Flipped(ValidIO(new DynInst))) 62 val iqAccept = Input(Vec(LSQEnqWidth, Bool())) 63 val resp = Vec(LSQEnqWidth, Output(new LSIdx)) 64} 65 66// Load / Store Queue Wrapper for XiangShan Out of Order LSU 67class LsqWrapper(implicit p: Parameters) extends XSModule with HasDCacheParameters with HasPerfEvents { 68 val io = IO(new Bundle() { 69 val hartId = Input(UInt(hartIdLen.W)) 70 val brqRedirect = Flipped(ValidIO(new Redirect)) 71 val stvecFeedback = Vec(VecStorePipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO))) 72 val ldvecFeedback = Vec(VecLoadPipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO))) 73 val enq = new LsqEnqIO 74 val ldu = new Bundle() { 75 val stld_nuke_query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) // from load_s2 76 val ldld_nuke_query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) // from load_s2 77 val ldin = Vec(LoadPipelineWidth, Flipped(Decoupled(new LqWriteBundle))) // from load_s3 78 } 79 val sta = new Bundle() { 80 val storeMaskIn = Vec(StorePipelineWidth, Flipped(Valid(new StoreMaskBundle))) // from store_s0, store mask, send to sq from rs 81 val storeAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // from store_s1 82 val storeAddrInRe = Vec(StorePipelineWidth, Input(new LsPipelineBundle())) // from store_s2 83 } 84 val std = new Bundle() { 85 val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new MemExuOutput(isVector = true)))) // from store_s0, store data, send to sq from rs 86 } 87 val ldout = Vec(LoadPipelineWidth, DecoupledIO(new MemExuOutput)) 88 val ld_raw_data = Vec(LoadPipelineWidth, Output(new LoadDataFromLQBundle)) 89 val replay = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle)) 90 val sbuffer = Vec(EnsbufferWidth, Decoupled(new DCacheWordReqWithVaddrAndPfFlag)) 91 val sbufferVecDifftestInfo = Vec(EnsbufferWidth, Decoupled(new DynInst)) // The vector store difftest needs is 92 val forward = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO)) 93 val rob = Flipped(new RobLsqIO) 94 val nuke_rollback = Vec(StorePipelineWidth, Output(Valid(new Redirect))) 95 val nack_rollback = Output(Valid(new Redirect)) 96 val release = Flipped(Valid(new Release)) 97 // val refill = Flipped(Valid(new Refill)) 98 val tl_d_channel = Input(new DcacheToLduForwardIO) 99 val maControl = Flipped(new StoreMaBufToSqControlIO) 100 val uncacheOutstanding = Input(Bool()) 101 val uncache = new UncacheWordIO 102 val mmioStout = DecoupledIO(new MemExuOutput) // writeback uncached store 103 // TODO: implement vector store 104 val vecmmioStout = DecoupledIO(new MemExuOutput(isVector = true)) // vec writeback uncached store 105 val sqEmpty = Output(Bool()) 106 val lq_rep_full = Output(Bool()) 107 val sqFull = Output(Bool()) 108 val lqFull = Output(Bool()) 109 val sqCancelCnt = Output(UInt(log2Up(StoreQueueSize+1).W)) 110 val lqCancelCnt = Output(UInt(log2Up(VirtualLoadQueueSize+1).W)) 111 val lqDeq = Output(UInt(log2Up(CommitWidth + 1).W)) 112 val sqDeq = Output(UInt(log2Ceil(EnsbufferWidth + 1).W)) 113 val lqCanAccept = Output(Bool()) 114 val sqCanAccept = Output(Bool()) 115 val lqDeqPtr = Output(new LqPtr) 116 val sqDeqPtr = Output(new SqPtr) 117 val exceptionAddr = new ExceptionAddrIO 118 val flushFrmMaBuf = Input(Bool()) 119 val issuePtrExt = Output(new SqPtr) 120 val l2_hint = Input(Valid(new L2ToL1Hint())) 121 val tlb_hint = Flipped(new TlbHintIO) 122 val cmoOpReq = DecoupledIO(new CMOReq) 123 val cmoOpResp = Flipped(DecoupledIO(new CMOResp)) 124 val flushSbuffer = new SbufferFlushBundle 125 val force_write = Output(Bool()) 126 val lqEmpty = Output(Bool()) 127 128 // top-down 129 val debugTopDown = new LoadQueueTopDownIO 130 }) 131 132 val loadQueue = Module(new LoadQueue) 133 val storeQueue = Module(new StoreQueue) 134 135 storeQueue.io.hartId := io.hartId 136 storeQueue.io.uncacheOutstanding := io.uncacheOutstanding 137 138 if (backendParams.debugEn){ dontTouch(loadQueue.io.tlbReplayDelayCycleCtrl) } 139 140 // Todo: imm 141 val tlbReplayDelayCycleCtrl = WireInit(VecInit(Seq(14.U(ReSelectLen.W), 0.U(ReSelectLen.W), 125.U(ReSelectLen.W), 0.U(ReSelectLen.W)))) 142 loadQueue.io.tlbReplayDelayCycleCtrl := tlbReplayDelayCycleCtrl 143 144 // io.enq logic 145 // LSQ: send out canAccept when both load queue and store queue are ready 146 // Dispatch: send instructions to LSQ only when they are ready 147 io.enq.canAccept := loadQueue.io.enq.canAccept && storeQueue.io.enq.canAccept 148 io.lqCanAccept := loadQueue.io.enq.canAccept 149 io.sqCanAccept := storeQueue.io.enq.canAccept 150 loadQueue.io.enq.sqCanAccept := storeQueue.io.enq.canAccept 151 storeQueue.io.enq.lqCanAccept := loadQueue.io.enq.canAccept 152 io.lqDeqPtr := loadQueue.io.lqDeqPtr 153 io.sqDeqPtr := storeQueue.io.sqDeqPtr 154 for (i <- io.enq.req.indices) { 155 loadQueue.io.enq.needAlloc(i) := io.enq.needAlloc(i)(0) 156 loadQueue.io.enq.req(i).valid := io.enq.needAlloc(i)(0) && io.enq.req(i).valid 157 loadQueue.io.enq.req(i).bits := io.enq.req(i).bits 158 loadQueue.io.enq.req(i).bits.sqIdx := storeQueue.io.enq.resp(i) 159 160 storeQueue.io.enq.needAlloc(i) := io.enq.needAlloc(i)(1) 161 storeQueue.io.enq.req(i).valid := io.enq.needAlloc(i)(1) && io.enq.req(i).valid 162 storeQueue.io.enq.req(i).bits := io.enq.req(i).bits 163 storeQueue.io.enq.req(i).bits.lqIdx := loadQueue.io.enq.resp(i) 164 165 io.enq.resp(i).lqIdx := loadQueue.io.enq.resp(i) 166 io.enq.resp(i).sqIdx := storeQueue.io.enq.resp(i) 167 } 168 169 // store queue wiring 170 storeQueue.io.brqRedirect <> io.brqRedirect 171 storeQueue.io.vecFeedback <> io.stvecFeedback 172 storeQueue.io.storeAddrIn <> io.sta.storeAddrIn // from store_s1 173 storeQueue.io.storeAddrInRe <> io.sta.storeAddrInRe // from store_s2 174 storeQueue.io.storeDataIn <> io.std.storeDataIn // from store_s0 175 storeQueue.io.storeMaskIn <> io.sta.storeMaskIn // from store_s0 176 storeQueue.io.sbuffer <> io.sbuffer 177 storeQueue.io.sbufferVecDifftestInfo <> io.sbufferVecDifftestInfo 178 storeQueue.io.mmioStout <> io.mmioStout 179 storeQueue.io.vecmmioStout <> io.vecmmioStout 180 storeQueue.io.rob <> io.rob 181 storeQueue.io.exceptionAddr.isStore := DontCare 182 storeQueue.io.sqCancelCnt <> io.sqCancelCnt 183 storeQueue.io.sqDeq <> io.sqDeq 184 storeQueue.io.sqEmpty <> io.sqEmpty 185 storeQueue.io.sqFull <> io.sqFull 186 storeQueue.io.forward <> io.forward // overlap forwardMask & forwardData, DO NOT CHANGE SEQUENCE 187 storeQueue.io.force_write <> io.force_write 188 storeQueue.io.cmoOpReq <> io.cmoOpReq 189 storeQueue.io.cmoOpResp <> io.cmoOpResp 190 storeQueue.io.flushSbuffer <> io.flushSbuffer 191 storeQueue.io.maControl <> io.maControl 192 193 /* <------- DANGEROUS: Don't change sequence here ! -------> */ 194 195 // load queue wiring 196 loadQueue.io.redirect <> io.brqRedirect 197 loadQueue.io.vecFeedback <> io.ldvecFeedback 198 loadQueue.io.ldu <> io.ldu 199 loadQueue.io.ldout <> io.ldout 200 loadQueue.io.ld_raw_data <> io.ld_raw_data 201 loadQueue.io.rob <> io.rob 202 loadQueue.io.nuke_rollback <> io.nuke_rollback 203 loadQueue.io.nack_rollback <> io.nack_rollback 204 loadQueue.io.replay <> io.replay 205 // loadQueue.io.refill <> io.refill 206 loadQueue.io.tl_d_channel <> io.tl_d_channel 207 loadQueue.io.release <> io.release 208 loadQueue.io.exceptionAddr.isStore := DontCare 209 loadQueue.io.flushFrmMaBuf := io.flushFrmMaBuf 210 loadQueue.io.lqCancelCnt <> io.lqCancelCnt 211 loadQueue.io.sq.stAddrReadySqPtr <> storeQueue.io.stAddrReadySqPtr 212 loadQueue.io.sq.stAddrReadyVec <> storeQueue.io.stAddrReadyVec 213 loadQueue.io.sq.stDataReadySqPtr <> storeQueue.io.stDataReadySqPtr 214 loadQueue.io.sq.stDataReadyVec <> storeQueue.io.stDataReadyVec 215 loadQueue.io.sq.stIssuePtr <> storeQueue.io.stIssuePtr 216 loadQueue.io.sq.sqEmpty <> storeQueue.io.sqEmpty 217 loadQueue.io.sta.storeAddrIn <> io.sta.storeAddrIn // store_s1 218 loadQueue.io.std.storeDataIn <> io.std.storeDataIn // store_s0 219 loadQueue.io.lqFull <> io.lqFull 220 loadQueue.io.lq_rep_full <> io.lq_rep_full 221 loadQueue.io.lqDeq <> io.lqDeq 222 loadQueue.io.l2_hint <> io.l2_hint 223 loadQueue.io.tlb_hint <> io.tlb_hint 224 loadQueue.io.lqEmpty <> io.lqEmpty 225 226 // rob commits for lsq is delayed for two cycles, which causes the delayed update for deqPtr in lq/sq 227 // s0: commit 228 // s1: exception find 229 // s2: exception triggered 230 // s3: ptr updated & new address 231 // address will be used at the next cycle after exception is triggered 232 io.exceptionAddr.vaddr := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vaddr, loadQueue.io.exceptionAddr.vaddr) 233 io.exceptionAddr.vaNeedExt := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vaNeedExt, loadQueue.io.exceptionAddr.vaNeedExt) 234 io.exceptionAddr.isHyper := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.isHyper, loadQueue.io.exceptionAddr.isHyper) 235 io.exceptionAddr.vstart := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vstart, loadQueue.io.exceptionAddr.vstart) 236 io.exceptionAddr.vl := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vl, loadQueue.io.exceptionAddr.vl) 237 io.exceptionAddr.gpaddr := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.gpaddr, loadQueue.io.exceptionAddr.gpaddr) 238 io.exceptionAddr.isForVSnonLeafPTE:= Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.isForVSnonLeafPTE, loadQueue.io.exceptionAddr.isForVSnonLeafPTE) 239 io.issuePtrExt := storeQueue.io.stAddrReadySqPtr 240 241 // naive uncache arbiter 242 val s_idle :: s_load :: s_store :: Nil = Enum(3) 243 val pendingstate = RegInit(s_idle) 244 245 switch(pendingstate){ 246 is(s_idle){ 247 when(io.uncache.req.fire){ 248 pendingstate := Mux(loadQueue.io.uncache.req.valid, s_load, 249 Mux(io.uncacheOutstanding, s_idle, s_store)) 250 } 251 } 252 is(s_load){ 253 when(io.uncache.resp.fire){ 254 pendingstate := s_idle 255 } 256 } 257 is(s_store){ 258 when(io.uncache.resp.fire){ 259 pendingstate := s_idle 260 } 261 } 262 } 263 264 loadQueue.io.uncache := DontCare 265 storeQueue.io.uncache := DontCare 266 loadQueue.io.uncache.req.ready := false.B 267 storeQueue.io.uncache.req.ready := false.B 268 loadQueue.io.uncache.resp.valid := false.B 269 storeQueue.io.uncache.resp.valid := false.B 270 when(pendingstate === s_idle){ 271 when(loadQueue.io.uncache.req.valid){ 272 io.uncache.req <> loadQueue.io.uncache.req 273 }.otherwise{ 274 io.uncache.req <> storeQueue.io.uncache.req 275 } 276 }.otherwise{ 277 io.uncache.req.valid := false.B 278 io.uncache.req.bits := DontCare 279 } 280 when (io.uncacheOutstanding) { 281 io.uncache.resp <> loadQueue.io.uncache.resp 282 } .otherwise { 283 when(pendingstate === s_load){ 284 io.uncache.resp <> loadQueue.io.uncache.resp 285 }.otherwise{ 286 io.uncache.resp <> storeQueue.io.uncache.resp 287 } 288 } 289 290 loadQueue.io.debugTopDown <> io.debugTopDown 291 292 assert(!(loadQueue.io.uncache.req.valid && storeQueue.io.uncache.req.valid)) 293 assert(!(loadQueue.io.uncache.resp.valid && storeQueue.io.uncache.resp.valid)) 294 when (!io.uncacheOutstanding) { 295 assert(!((loadQueue.io.uncache.resp.valid || storeQueue.io.uncache.resp.valid) && pendingstate === s_idle)) 296 } 297 298 299 val perfEvents = Seq(loadQueue, storeQueue).flatMap(_.getPerfEvents) 300 generatePerfEvent() 301} 302 303class LsqEnqCtrl(implicit p: Parameters) extends XSModule 304 with HasVLSUParameters { 305 val io = IO(new Bundle { 306 val redirect = Flipped(ValidIO(new Redirect)) 307 // to dispatch 308 val enq = new LsqEnqIO 309 // from `memBlock.io.lqDeq 310 val lcommit = Input(UInt(log2Up(CommitWidth + 1).W)) 311 // from `memBlock.io.sqDeq` 312 val scommit = Input(UInt(log2Ceil(EnsbufferWidth + 1).W)) 313 // from/tp lsq 314 val lqCancelCnt = Input(UInt(log2Up(VirtualLoadQueueSize + 1).W)) 315 val sqCancelCnt = Input(UInt(log2Up(StoreQueueSize + 1).W)) 316 val lqFreeCount = Output(UInt(log2Up(VirtualLoadQueueSize + 1).W)) 317 val sqFreeCount = Output(UInt(log2Up(StoreQueueSize + 1).W)) 318 val enqLsq = Flipped(new LsqEnqIO) 319 }) 320 321 val lqPtr = RegInit(0.U.asTypeOf(new LqPtr)) 322 val sqPtr = RegInit(0.U.asTypeOf(new SqPtr)) 323 val lqCounter = RegInit(VirtualLoadQueueSize.U(log2Up(VirtualLoadQueueSize + 1).W)) 324 val sqCounter = RegInit(StoreQueueSize.U(log2Up(StoreQueueSize + 1).W)) 325 val canAccept = RegInit(false.B) 326 327 val blockVec = io.enq.iqAccept.map(!_) :+ true.B 328 val numLsElem = io.enq.req.map(_.bits.numLsElem) 329 val needEnqLoadQueue = VecInit(io.enq.req.map(x => FuType.isLoad(x.bits.fuType) || FuType.isVNonsegLoad(x.bits.fuType))) 330 val needEnqStoreQueue = VecInit(io.enq.req.map(x => FuType.isStore(x.bits.fuType) || FuType.isVNonsegStore(x.bits.fuType))) 331 val loadQueueElem = needEnqLoadQueue.zip(numLsElem).map(x => Mux(x._1, x._2, 0.U)) 332 val storeQueueElem = needEnqStoreQueue.zip(numLsElem).map(x => Mux(x._1, x._2, 0.U)) 333 val loadFlowPopCount = 0.U +: loadQueueElem.zipWithIndex.map{ case (l, i) => 334 loadQueueElem.take(i + 1).reduce(_ + _) 335 } 336 val storeFlowPopCount = 0.U +: storeQueueElem.zipWithIndex.map { case (s, i) => 337 storeQueueElem.take(i + 1).reduce(_ + _) 338 } 339 val lqAllocNumber = PriorityMux(blockVec.zip(loadFlowPopCount)) 340 val sqAllocNumber = PriorityMux(blockVec.zip(storeFlowPopCount)) 341 342 io.lqFreeCount := lqCounter 343 io.sqFreeCount := sqCounter 344 // How to update ptr and counter: 345 // (1) by default, updated according to enq/commit 346 // (2) when redirect and dispatch queue is empty, update according to lsq 347 val t1_redirect = RegNext(io.redirect.valid) 348 val t2_redirect = RegNext(t1_redirect) 349 val t2_update = t2_redirect && !VecInit(io.enq.needAlloc.map(_.orR)).asUInt.orR 350 val t3_update = RegNext(t2_update) 351 val t3_lqCancelCnt = GatedRegNext(io.lqCancelCnt) 352 val t3_sqCancelCnt = GatedRegNext(io.sqCancelCnt) 353 when (t3_update) { 354 lqPtr := lqPtr - t3_lqCancelCnt 355 lqCounter := lqCounter + io.lcommit + t3_lqCancelCnt 356 sqPtr := sqPtr - t3_sqCancelCnt 357 sqCounter := sqCounter + io.scommit + t3_sqCancelCnt 358 }.elsewhen (!io.redirect.valid && io.enq.canAccept) { 359 lqPtr := lqPtr + lqAllocNumber 360 lqCounter := lqCounter + io.lcommit - lqAllocNumber 361 sqPtr := sqPtr + sqAllocNumber 362 sqCounter := sqCounter + io.scommit - sqAllocNumber 363 }.otherwise { 364 lqCounter := lqCounter + io.lcommit 365 sqCounter := sqCounter + io.scommit 366 } 367 368 369 //TODO MaxAllocate and width of lqOffset/sqOffset needs to be discussed 370 val lqMaxAllocate = LSQLdEnqWidth 371 val sqMaxAllocate = LSQStEnqWidth 372 val maxAllocate = lqMaxAllocate max sqMaxAllocate 373 val ldCanAccept = lqCounter >= lqAllocNumber +& lqMaxAllocate.U 374 val sqCanAccept = sqCounter >= sqAllocNumber +& sqMaxAllocate.U 375 // It is possible that t3_update and enq are true at the same clock cycle. 376 // For example, if redirect.valid lasts more than one clock cycle, 377 // after the last redirect, new instructions may enter but previously redirect has not been resolved (updated according to the cancel count from LSQ). 378 // To solve the issue easily, we block enqueue when t3_update, which is RegNext(t2_update). 379 io.enq.canAccept := RegNext(ldCanAccept && sqCanAccept && !t2_update) 380 val lqOffset = Wire(Vec(io.enq.resp.length, UInt(lqPtr.value.getWidth.W))) 381 val sqOffset = Wire(Vec(io.enq.resp.length, UInt(sqPtr.value.getWidth.W))) 382 for ((resp, i) <- io.enq.resp.zipWithIndex) { 383 lqOffset(i) := loadFlowPopCount(i) 384 resp.lqIdx := lqPtr + lqOffset(i) 385 sqOffset(i) := storeFlowPopCount(i) 386 resp.sqIdx := sqPtr + sqOffset(i) 387 } 388 389 io.enqLsq.needAlloc := RegNext(io.enq.needAlloc) 390 io.enqLsq.iqAccept := RegNext(io.enq.iqAccept) 391 io.enqLsq.req.zip(io.enq.req).zip(io.enq.resp).foreach{ case ((toLsq, enq), resp) => 392 val do_enq = enq.valid && !io.redirect.valid && io.enq.canAccept 393 toLsq.valid := RegNext(do_enq) 394 toLsq.bits := RegEnable(enq.bits, do_enq) 395 toLsq.bits.lqIdx := RegEnable(resp.lqIdx, do_enq) 396 toLsq.bits.sqIdx := RegEnable(resp.sqIdx, do_enq) 397 } 398 399}