1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.cache._ 26import xiangshan.cache.{DCacheWordIO, DCacheLineIO, MemoryOpConstants} 27import xiangshan.cache.mmu.{TlbRequestIO} 28import xiangshan.mem._ 29import xiangshan.backend.rob.RobLsqIO 30 31class ExceptionAddrIO(implicit p: Parameters) extends XSBundle { 32 val isStore = Input(Bool()) 33 val vaddr = Output(UInt(VAddrBits.W)) 34} 35 36class FwdEntry extends Bundle { 37 val validFast = Bool() // validFast is generated the same cycle with query 38 val valid = Bool() // valid is generated 1 cycle after query request 39 val data = UInt(8.W) // data is generated 1 cycle after query request 40} 41 42// inflight miss block reqs 43class InflightBlockInfo(implicit p: Parameters) extends XSBundle { 44 val block_addr = UInt(PAddrBits.W) 45 val valid = Bool() 46} 47 48class LsqEnqIO(implicit p: Parameters) extends XSBundle { 49 val canAccept = Output(Bool()) 50 val needAlloc = Vec(exuParameters.LsExuCnt, Input(UInt(2.W))) 51 val req = Vec(exuParameters.LsExuCnt, Flipped(ValidIO(new MicroOp))) 52 val resp = Vec(exuParameters.LsExuCnt, Output(new LSIdx)) 53} 54 55// Load / Store Queue Wrapper for XiangShan Out of Order LSU 56class LsqWrapper(implicit p: Parameters) extends XSModule with HasDCacheParameters with HasPerfEvents { 57 val io = IO(new Bundle() { 58 val hartId = Input(UInt(8.W)) 59 val brqRedirect = Flipped(ValidIO(new Redirect)) 60 val enq = new LsqEnqIO 61 val ldu = new Bundle() { 62 val stld_nuke_query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) // from load_s2 63 val ldld_nuke_query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) // from load_s2 64 val ldin = Vec(LoadPipelineWidth, Flipped(Decoupled(new LqWriteBundle))) // from load_s3 65 } 66 val sta = new Bundle() { 67 val storeMaskIn = Vec(StorePipelineWidth, Flipped(Valid(new StoreMaskBundle))) // from store_s0, store mask, send to sq from rs 68 val storeAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // from store_s1 69 val storeAddrInRe = Vec(StorePipelineWidth, Input(new LsPipelineBundle())) // from store_s2 70 } 71 val std = new Bundle() { 72 val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new ExuOutput))) // from store_s0, store data, send to sq from rs 73 } 74 val ldout = Vec(LoadPipelineWidth, DecoupledIO(new ExuOutput)) 75 val ld_raw_data = Vec(LoadPipelineWidth, Output(new LoadDataFromLQBundle)) 76 val replay = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle)) 77 val sbuffer = Vec(EnsbufferWidth, Decoupled(new DCacheWordReqWithVaddr)) 78 val forward = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO)) 79 val rob = Flipped(new RobLsqIO) 80 val rollback = Output(Valid(new Redirect)) 81 val release = Flipped(Valid(new Release)) 82 val refill = Flipped(Valid(new Refill)) 83 val uncacheOutstanding = Input(Bool()) 84 val uncache = new UncacheWordIO 85 val mmioStout = DecoupledIO(new ExuOutput) // writeback uncached store 86 val sqEmpty = Output(Bool()) 87 val lq_rep_full = Output(Bool()) 88 val sqFull = Output(Bool()) 89 val lqFull = Output(Bool()) 90 val sqCancelCnt = Output(UInt(log2Up(StoreQueueSize+1).W)) 91 val lqCancelCnt = Output(UInt(log2Up(VirtualLoadQueueSize+1).W)) 92 val lqDeq = Output(UInt(log2Up(CommitWidth + 1).W)) 93 val sqDeq = Output(UInt(log2Ceil(EnsbufferWidth + 1).W)) 94 val lqCanAccept = Output(Bool()) 95 val sqCanAccept = Output(Bool()) 96 val exceptionAddr = new ExceptionAddrIO 97 val trigger = Vec(LoadPipelineWidth, new LqTriggerIO) 98 val issuePtrExt = Output(new SqPtr) 99 val l2_hint = Input(Valid(new L2ToL1Hint())) 100 }) 101 102 val loadQueue = Module(new LoadQueue) 103 val storeQueue = Module(new StoreQueue) 104 105 storeQueue.io.hartId := io.hartId 106 storeQueue.io.uncacheOutstanding := io.uncacheOutstanding 107 108 109 dontTouch(loadQueue.io.tlbReplayDelayCycleCtrl) 110 val tlbReplayDelayCycleCtrl = WireInit(VecInit(Seq(14.U(ReSelectLen.W), 0.U(ReSelectLen.W), 125.U(ReSelectLen.W), 0.U(ReSelectLen.W)))) 111 loadQueue.io.tlbReplayDelayCycleCtrl := tlbReplayDelayCycleCtrl 112 113 // io.enq logic 114 // LSQ: send out canAccept when both load queue and store queue are ready 115 // Dispatch: send instructions to LSQ only when they are ready 116 io.enq.canAccept := loadQueue.io.enq.canAccept && storeQueue.io.enq.canAccept 117 io.lqCanAccept := loadQueue.io.enq.canAccept 118 io.sqCanAccept := storeQueue.io.enq.canAccept 119 loadQueue.io.enq.sqCanAccept := storeQueue.io.enq.canAccept 120 storeQueue.io.enq.lqCanAccept := loadQueue.io.enq.canAccept 121 for (i <- io.enq.req.indices) { 122 loadQueue.io.enq.needAlloc(i) := io.enq.needAlloc(i)(0) 123 loadQueue.io.enq.req(i).valid := io.enq.needAlloc(i)(0) && io.enq.req(i).valid 124 loadQueue.io.enq.req(i).bits := io.enq.req(i).bits 125 loadQueue.io.enq.req(i).bits.sqIdx := storeQueue.io.enq.resp(i) 126 127 storeQueue.io.enq.needAlloc(i) := io.enq.needAlloc(i)(1) 128 storeQueue.io.enq.req(i).valid := io.enq.needAlloc(i)(1) && io.enq.req(i).valid 129 storeQueue.io.enq.req(i).bits := io.enq.req(i).bits 130 storeQueue.io.enq.req(i).bits := io.enq.req(i).bits 131 storeQueue.io.enq.req(i).bits.lqIdx := loadQueue.io.enq.resp(i) 132 133 io.enq.resp(i).lqIdx := loadQueue.io.enq.resp(i) 134 io.enq.resp(i).sqIdx := storeQueue.io.enq.resp(i) 135 } 136 137 // store queue wiring 138 storeQueue.io.brqRedirect <> io.brqRedirect 139 storeQueue.io.storeAddrIn <> io.sta.storeAddrIn // from store_s1 140 storeQueue.io.storeAddrInRe <> io.sta.storeAddrInRe // from store_s2 141 storeQueue.io.storeDataIn <> io.std.storeDataIn // from store_s0 142 storeQueue.io.storeMaskIn <> io.sta.storeMaskIn // from store_s0 143 storeQueue.io.sbuffer <> io.sbuffer 144 storeQueue.io.mmioStout <> io.mmioStout 145 storeQueue.io.rob <> io.rob 146 storeQueue.io.exceptionAddr.isStore := DontCare 147 storeQueue.io.sqCancelCnt <> io.sqCancelCnt 148 storeQueue.io.sqDeq <> io.sqDeq 149 storeQueue.io.sqEmpty <> io.sqEmpty 150 storeQueue.io.sqFull <> io.sqFull 151 storeQueue.io.forward <> io.forward // overlap forwardMask & forwardData, DO NOT CHANGE SEQUENCE 152 153 /* <------- DANGEROUS: Don't change sequence here ! -------> */ 154 155 // load queue wiring 156 loadQueue.io.redirect <> io.brqRedirect 157 loadQueue.io.ldu <> io.ldu 158 loadQueue.io.ldout <> io.ldout 159 loadQueue.io.ld_raw_data <> io.ld_raw_data 160 loadQueue.io.rob <> io.rob 161 loadQueue.io.rollback <> io.rollback 162 loadQueue.io.replay <> io.replay 163 loadQueue.io.refill <> io.refill 164 loadQueue.io.release <> io.release 165 loadQueue.io.trigger <> io.trigger 166 loadQueue.io.exceptionAddr.isStore := DontCare 167 loadQueue.io.lqCancelCnt <> io.lqCancelCnt 168 loadQueue.io.sq.stAddrReadySqPtr <> storeQueue.io.stAddrReadySqPtr 169 loadQueue.io.sq.stAddrReadyVec <> storeQueue.io.stAddrReadyVec 170 loadQueue.io.sq.stDataReadySqPtr <> storeQueue.io.stDataReadySqPtr 171 loadQueue.io.sq.stDataReadyVec <> storeQueue.io.stDataReadyVec 172 loadQueue.io.sq.stIssuePtr <> storeQueue.io.stIssuePtr 173 loadQueue.io.sq.sqEmpty <> storeQueue.io.sqEmpty 174 loadQueue.io.sta.storeAddrIn <> io.sta.storeAddrIn // store_s1 175 loadQueue.io.std.storeDataIn <> io.std.storeDataIn // store_s0 176 loadQueue.io.lqFull <> io.lqFull 177 loadQueue.io.lq_rep_full <> io.lq_rep_full 178 loadQueue.io.lqDeq <> io.lqDeq 179 loadQueue.io.l2_hint <> io.l2_hint 180 181 // rob commits for lsq is delayed for two cycles, which causes the delayed update for deqPtr in lq/sq 182 // s0: commit 183 // s1: exception find 184 // s2: exception triggered 185 // s3: ptr updated & new address 186 // address will be used at the next cycle after exception is triggered 187 io.exceptionAddr.vaddr := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vaddr, loadQueue.io.exceptionAddr.vaddr) 188 io.issuePtrExt := storeQueue.io.stAddrReadySqPtr 189 190 // naive uncache arbiter 191 val s_idle :: s_load :: s_store :: Nil = Enum(3) 192 val pendingstate = RegInit(s_idle) 193 194 switch(pendingstate){ 195 is(s_idle){ 196 when(io.uncache.req.fire() && !io.uncacheOutstanding){ 197 pendingstate := Mux(loadQueue.io.uncache.req.valid, s_load, 198 Mux(io.uncacheOutstanding, s_idle, s_store)) 199 } 200 } 201 is(s_load){ 202 when(io.uncache.resp.fire()){ 203 pendingstate := s_idle 204 } 205 } 206 is(s_store){ 207 when(io.uncache.resp.fire()){ 208 pendingstate := s_idle 209 } 210 } 211 } 212 213 loadQueue.io.uncache := DontCare 214 storeQueue.io.uncache := DontCare 215 loadQueue.io.uncache.resp.valid := false.B 216 storeQueue.io.uncache.resp.valid := false.B 217 when(loadQueue.io.uncache.req.valid){ 218 io.uncache.req <> loadQueue.io.uncache.req 219 }.otherwise{ 220 io.uncache.req <> storeQueue.io.uncache.req 221 } 222 when (io.uncacheOutstanding) { 223 io.uncache.resp <> loadQueue.io.uncache.resp 224 } .otherwise { 225 when(pendingstate === s_load){ 226 io.uncache.resp <> loadQueue.io.uncache.resp 227 }.otherwise{ 228 io.uncache.resp <> storeQueue.io.uncache.resp 229 } 230 } 231 232 233 assert(!(loadQueue.io.uncache.req.valid && storeQueue.io.uncache.req.valid)) 234 assert(!(loadQueue.io.uncache.resp.valid && storeQueue.io.uncache.resp.valid)) 235 when (!io.uncacheOutstanding) { 236 assert(!((loadQueue.io.uncache.resp.valid || storeQueue.io.uncache.resp.valid) && pendingstate === s_idle)) 237 } 238 239 240 val perfEvents = Seq(loadQueue, storeQueue).flatMap(_.getPerfEvents) 241 generatePerfEvent() 242} 243 244class LsqEnqCtrl(implicit p: Parameters) extends XSModule { 245 val io = IO(new Bundle { 246 val redirect = Flipped(ValidIO(new Redirect)) 247 // to dispatch 248 val enq = new LsqEnqIO 249 // from `memBlock.io.lqDeq 250 val lcommit = Input(UInt(log2Up(CommitWidth + 1).W)) 251 // from `memBlock.io.sqDeq` 252 val scommit = Input(UInt(log2Ceil(EnsbufferWidth + 1).W)) 253 // from/tp lsq 254 val lqCancelCnt = Input(UInt(log2Up(VirtualLoadQueueSize + 1).W)) 255 val sqCancelCnt = Input(UInt(log2Up(StoreQueueSize + 1).W)) 256 val enqLsq = Flipped(new LsqEnqIO) 257 }) 258 259 val lqPtr = RegInit(0.U.asTypeOf(new LqPtr)) 260 val sqPtr = RegInit(0.U.asTypeOf(new SqPtr)) 261 val lqCounter = RegInit(VirtualLoadQueueSize.U(log2Up(VirtualLoadQueueSize + 1).W)) 262 val sqCounter = RegInit(StoreQueueSize.U(log2Up(StoreQueueSize + 1).W)) 263 val canAccept = RegInit(false.B) 264 265 val loadEnqNumber = PopCount(io.enq.req.zip(io.enq.needAlloc).map(x => x._1.valid && x._2(0))) 266 val storeEnqNumber = PopCount(io.enq.req.zip(io.enq.needAlloc).map(x => x._1.valid && x._2(1))) 267 268 // How to update ptr and counter: 269 // (1) by default, updated according to enq/commit 270 // (2) when redirect and dispatch queue is empty, update according to lsq 271 val t1_redirect = RegNext(io.redirect.valid) 272 val t2_redirect = RegNext(t1_redirect) 273 val t2_update = t2_redirect && !VecInit(io.enq.needAlloc.map(_.orR)).asUInt.orR 274 val t3_update = RegNext(t2_update) 275 val t3_lqCancelCnt = RegNext(io.lqCancelCnt) 276 val t3_sqCancelCnt = RegNext(io.sqCancelCnt) 277 when (t3_update) { 278 lqPtr := lqPtr - t3_lqCancelCnt 279 lqCounter := lqCounter + io.lcommit + t3_lqCancelCnt 280 sqPtr := sqPtr - t3_sqCancelCnt 281 sqCounter := sqCounter + io.scommit + t3_sqCancelCnt 282 }.elsewhen (!io.redirect.valid && io.enq.canAccept) { 283 lqPtr := lqPtr + loadEnqNumber 284 lqCounter := lqCounter + io.lcommit - loadEnqNumber 285 sqPtr := sqPtr + storeEnqNumber 286 sqCounter := sqCounter + io.scommit - storeEnqNumber 287 }.otherwise { 288 lqCounter := lqCounter + io.lcommit 289 sqCounter := sqCounter + io.scommit 290 } 291 292 293 val maxAllocate = Seq(exuParameters.LduCnt, exuParameters.StuCnt).max 294 val ldCanAccept = lqCounter >= loadEnqNumber +& maxAllocate.U 295 val sqCanAccept = sqCounter >= storeEnqNumber +& maxAllocate.U 296 // It is possible that t3_update and enq are true at the same clock cycle. 297 // For example, if redirect.valid lasts more than one clock cycle, 298 // after the last redirect, new instructions may enter but previously redirect 299 // has not been resolved (updated according to the cancel count from LSQ). 300 // To solve the issue easily, we block enqueue when t3_update, which is RegNext(t2_update). 301 io.enq.canAccept := RegNext(ldCanAccept && sqCanAccept && !t2_update) 302 val lqOffset = Wire(Vec(io.enq.resp.length, UInt(log2Up(maxAllocate + 1).W))) 303 val sqOffset = Wire(Vec(io.enq.resp.length, UInt(log2Up(maxAllocate + 1).W))) 304 for ((resp, i) <- io.enq.resp.zipWithIndex) { 305 lqOffset(i) := PopCount(io.enq.needAlloc.take(i).map(a => a(0))) 306 resp.lqIdx := lqPtr + lqOffset(i) 307 sqOffset(i) := PopCount(io.enq.needAlloc.take(i).map(a => a(1))) 308 resp.sqIdx := sqPtr + sqOffset(i) 309 } 310 311 io.enqLsq.needAlloc := RegNext(io.enq.needAlloc) 312 io.enqLsq.req.zip(io.enq.req).zip(io.enq.resp).foreach{ case ((toLsq, enq), resp) => 313 val do_enq = enq.valid && !io.redirect.valid && io.enq.canAccept 314 toLsq.valid := RegNext(do_enq) 315 toLsq.bits := RegEnable(enq.bits, do_enq) 316 toLsq.bits.lqIdx := RegEnable(resp.lqIdx, do_enq) 317 toLsq.bits.sqIdx := RegEnable(resp.sqIdx, do_enq) 318 } 319 320}