1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import xiangshan._ 24import xiangshan.backend.fu.fpu.FPU 25import xiangshan.backend.rob.RobLsqIO 26import xiangshan.cache._ 27import xiangshan.frontend.FtqPtr 28import xiangshan.ExceptionNO._ 29 30class LqPtr(implicit p: Parameters) extends CircularQueuePtr[LqPtr]( 31 p => p(XSCoreParamsKey).LoadQueueSize 32){ 33} 34 35object LqPtr { 36 def apply(f: Bool, v: UInt)(implicit p: Parameters): LqPtr = { 37 val ptr = Wire(new LqPtr) 38 ptr.flag := f 39 ptr.value := v 40 ptr 41 } 42} 43 44trait HasLoadHelper { this: XSModule => 45 def rdataHelper(uop: MicroOp, rdata: UInt): UInt = { 46 val fpWen = uop.ctrl.fpWen 47 LookupTree(uop.ctrl.fuOpType, List( 48 LSUOpType.lb -> SignExt(rdata(7, 0) , XLEN), 49 LSUOpType.lh -> SignExt(rdata(15, 0), XLEN), 50 /* 51 riscv-spec-20191213: 12.2 NaN Boxing of Narrower Values 52 Any operation that writes a narrower result to an f register must write 53 all 1s to the uppermost FLEN−n bits to yield a legal NaN-boxed value. 54 */ 55 LSUOpType.lw -> Mux(fpWen, FPU.box(rdata, FPU.S), SignExt(rdata(31, 0), XLEN)), 56 LSUOpType.ld -> Mux(fpWen, FPU.box(rdata, FPU.D), SignExt(rdata(63, 0), XLEN)), 57 LSUOpType.lbu -> ZeroExt(rdata(7, 0) , XLEN), 58 LSUOpType.lhu -> ZeroExt(rdata(15, 0), XLEN), 59 LSUOpType.lwu -> ZeroExt(rdata(31, 0), XLEN), 60 )) 61 } 62} 63 64class LqEnqIO(implicit p: Parameters) extends XSBundle { 65 val canAccept = Output(Bool()) 66 val sqCanAccept = Input(Bool()) 67 val needAlloc = Vec(exuParameters.LsExuCnt, Input(Bool())) 68 val req = Vec(exuParameters.LsExuCnt, Flipped(ValidIO(new MicroOp))) 69 val resp = Vec(exuParameters.LsExuCnt, Output(new LqPtr)) 70} 71 72class LqPaddrWriteBundle(implicit p: Parameters) extends XSBundle { 73 val paddr = Output(UInt(PAddrBits.W)) 74 val lqIdx = Output(new LqPtr) 75} 76 77class LqTriggerIO(implicit p: Parameters) extends XSBundle { 78 val hitLoadAddrTriggerHitVec = Input(Vec(3, Bool())) 79 val lqLoadAddrTriggerHitVec = Output(Vec(3, Bool())) 80} 81 82// Load Queue 83class LoadQueue(implicit p: Parameters) extends XSModule 84 with HasDCacheParameters 85 with HasCircularQueuePtrHelper 86 with HasLoadHelper 87 with HasPerfEvents 88{ 89 val io = IO(new Bundle() { 90 val enq = new LqEnqIO 91 val brqRedirect = Flipped(ValidIO(new Redirect)) 92 val loadPaddrIn = Vec(LoadPipelineWidth, Flipped(Valid(new LqPaddrWriteBundle))) 93 val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LqWriteBundle))) 94 val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) 95 val s2_load_data_forwarded = Vec(LoadPipelineWidth, Input(Bool())) 96 val s3_delayed_load_error = Vec(LoadPipelineWidth, Input(Bool())) 97 val s2_dcache_require_replay = Vec(LoadPipelineWidth, Input(Bool())) 98 val s3_replay_from_fetch = Vec(LoadPipelineWidth, Input(Bool())) 99 val ldout = Vec(LoadPipelineWidth, DecoupledIO(new ExuOutput)) // writeback int load 100 val load_s1 = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO)) // TODO: to be renamed 101 val loadViolationQuery = Vec(LoadPipelineWidth, Flipped(new LoadViolationQueryIO)) 102 val rob = Flipped(new RobLsqIO) 103 val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store 104 val refill = Flipped(ValidIO(new Refill)) 105 val release = Flipped(ValidIO(new Release)) 106 val uncache = new UncacheWordIO 107 val exceptionAddr = new ExceptionAddrIO 108 val lqFull = Output(Bool()) 109 val lqCancelCnt = Output(UInt(log2Up(LoadQueueSize + 1).W)) 110 val trigger = Vec(LoadPipelineWidth, new LqTriggerIO) 111 }) 112 113 println("LoadQueue: size:" + LoadQueueSize) 114 115 val uop = Reg(Vec(LoadQueueSize, new MicroOp)) 116 // val data = Reg(Vec(LoadQueueSize, new LsRobEntry)) 117 val dataModule = Module(new LoadQueueDataWrapper(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth)) 118 dataModule.io := DontCare 119 val vaddrModule = Module(new SyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = LoadPipelineWidth + 1, numWrite = LoadPipelineWidth)) 120 vaddrModule.io := DontCare 121 val vaddrTriggerResultModule = Module(new SyncDataModuleTemplate(Vec(3, Bool()), LoadQueueSize, numRead = LoadPipelineWidth, numWrite = LoadPipelineWidth)) 122 vaddrTriggerResultModule.io := DontCare 123 val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated 124 val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid 125 val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB 126 val released = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // load data has been released by dcache 127 val error = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // load data has been corrupted 128 val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request 129 // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result 130 val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob 131 val refilling = WireInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB 132 133 val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst 134 val debug_paddr = Reg(Vec(LoadQueueSize, UInt(PAddrBits.W))) // mmio: inst is an mmio inst 135 136 val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new LqPtr)))) 137 val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr)) 138 val deqPtrExtNext = Wire(new LqPtr) 139 140 val enqPtr = enqPtrExt(0).value 141 val deqPtr = deqPtrExt.value 142 143 val validCount = distanceBetween(enqPtrExt(0), deqPtrExt) 144 val allowEnqueue = validCount <= (LoadQueueSize - LoadPipelineWidth).U 145 146 val deqMask = UIntToMask(deqPtr, LoadQueueSize) 147 val enqMask = UIntToMask(enqPtr, LoadQueueSize) 148 149 val commitCount = RegNext(io.rob.lcommit) 150 151 val release1cycle = io.release 152 val release2cycle = RegNext(io.release) 153 val release2cycle_dup_lsu = RegNext(io.release) 154 155 /** 156 * Enqueue at dispatch 157 * 158 * Currently, LoadQueue only allows enqueue when #emptyEntries > EnqWidth 159 */ 160 io.enq.canAccept := allowEnqueue 161 162 val canEnqueue = io.enq.req.map(_.valid) 163 val enqCancel = io.enq.req.map(_.bits.robIdx.needFlush(io.brqRedirect)) 164 for (i <- 0 until io.enq.req.length) { 165 val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i)) 166 val lqIdx = enqPtrExt(offset) 167 val index = io.enq.req(i).bits.lqIdx.value 168 when (canEnqueue(i) && !enqCancel(i)) { 169 uop(index).robIdx := io.enq.req(i).bits.robIdx 170 allocated(index) := true.B 171 datavalid(index) := false.B 172 writebacked(index) := false.B 173 released(index) := false.B 174 miss(index) := false.B 175 pending(index) := false.B 176 error(index) := false.B 177 XSError(!io.enq.canAccept || !io.enq.sqCanAccept, s"must accept $i\n") 178 XSError(index =/= lqIdx.value, s"must be the same entry $i\n") 179 } 180 io.enq.resp(i) := lqIdx 181 } 182 XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n") 183 184 /** 185 * Writeback load from load units 186 * 187 * Most load instructions writeback to regfile at the same time. 188 * However, 189 * (1) For an mmio instruction with exceptions, it writes back to ROB immediately. 190 * (2) For an mmio instruction without exceptions, it does not write back. 191 * The mmio instruction will be sent to lower level when it reaches ROB's head. 192 * After uncache response, it will write back through arbiter with loadUnit. 193 * (3) For cache misses, it is marked miss and sent to dcache later. 194 * After cache refills, it will write back through arbiter with loadUnit. 195 */ 196 for (i <- 0 until LoadPipelineWidth) { 197 dataModule.io.wb.wen(i) := false.B 198 dataModule.io.paddr.wen(i) := false.B 199 vaddrTriggerResultModule.io.wen(i) := false.B 200 val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value 201 202 // most lq status need to be updated immediately after load writeback to lq 203 // flag bits in lq needs to be updated accurately 204 when(io.loadIn(i).fire()) { 205 when(io.loadIn(i).bits.miss) { 206 XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n", 207 io.loadIn(i).bits.uop.lqIdx.asUInt, 208 io.loadIn(i).bits.uop.cf.pc, 209 io.loadIn(i).bits.vaddr, 210 io.loadIn(i).bits.paddr, 211 io.loadIn(i).bits.data, 212 io.loadIn(i).bits.mask, 213 io.loadIn(i).bits.forwardData.asUInt, 214 io.loadIn(i).bits.forwardMask.asUInt, 215 io.loadIn(i).bits.mmio 216 ) 217 }.otherwise { 218 XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n", 219 io.loadIn(i).bits.uop.lqIdx.asUInt, 220 io.loadIn(i).bits.uop.cf.pc, 221 io.loadIn(i).bits.vaddr, 222 io.loadIn(i).bits.paddr, 223 io.loadIn(i).bits.data, 224 io.loadIn(i).bits.mask, 225 io.loadIn(i).bits.forwardData.asUInt, 226 io.loadIn(i).bits.forwardMask.asUInt, 227 io.loadIn(i).bits.mmio 228 )} 229 if(EnableFastForward){ 230 datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.s2_load_data_forwarded(i)) && 231 !io.loadIn(i).bits.mmio && // mmio data is not valid until we finished uncache access 232 !io.s2_dcache_require_replay(i) // do not writeback if that inst will be resend from rs 233 } else { 234 datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.s2_load_data_forwarded(i)) && 235 !io.loadIn(i).bits.mmio // mmio data is not valid until we finished uncache access 236 } 237 writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 238 239 debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio 240 debug_paddr(loadWbIndex) := io.loadIn(i).bits.paddr 241 242 val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 243 if(EnableFastForward){ 244 miss(loadWbIndex) := dcacheMissed && !io.s2_load_data_forwarded(i) && !io.s2_dcache_require_replay(i) 245 } else { 246 miss(loadWbIndex) := dcacheMissed && !io.s2_load_data_forwarded(i) 247 } 248 pending(loadWbIndex) := io.loadIn(i).bits.mmio 249 released(loadWbIndex) := release2cycle.valid && 250 io.loadIn(i).bits.paddr(PAddrBits-1, DCacheLineOffset) === release2cycle.bits.paddr(PAddrBits-1, DCacheLineOffset) || 251 release1cycle.valid && 252 io.loadIn(i).bits.paddr(PAddrBits-1, DCacheLineOffset) === release1cycle.bits.paddr(PAddrBits-1, DCacheLineOffset) 253 } 254 255 // data bit in lq can be updated when load_s2 valid 256 // when(io.loadIn(i).bits.lq_data_wen){ 257 // val loadWbData = Wire(new LQDataEntry) 258 // loadWbData.paddr := io.loadIn(i).bits.paddr 259 // loadWbData.mask := io.loadIn(i).bits.mask 260 // loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data 261 // loadWbData.fwdMask := io.loadIn(i).bits.forwardMask 262 // dataModule.io.wbWrite(i, loadWbIndex, loadWbData) 263 // dataModule.io.wb.wen(i) := true.B 264 265 // // dirty code for load instr 266 // uop(loadWbIndex).pdest := io.loadIn(i).bits.uop.pdest 267 // uop(loadWbIndex).cf := io.loadIn(i).bits.uop.cf 268 // uop(loadWbIndex).ctrl := io.loadIn(i).bits.uop.ctrl 269 // uop(loadWbIndex).debugInfo := io.loadIn(i).bits.uop.debugInfo 270 271 // vaddrTriggerResultModule.io.waddr(i) := loadWbIndex 272 // vaddrTriggerResultModule.io.wdata(i) := io.trigger(i).hitLoadAddrTriggerHitVec 273 274 // vaddrTriggerResultModule.io.wen(i) := true.B 275 // } 276 277 // dirty code to reduce load_s2.valid fanout 278 when(io.loadIn(i).bits.lq_data_wen_dup(0)){ 279 val loadWbData = Wire(new LQDataEntry) 280 loadWbData.paddr := io.loadIn(i).bits.paddr 281 loadWbData.mask := io.loadIn(i).bits.mask 282 loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data 283 loadWbData.fwdMask := io.loadIn(i).bits.forwardMask 284 dataModule.io.wbWrite(i, loadWbIndex, loadWbData) 285 dataModule.io.wb.wen(i) := true.B 286 } 287 // dirty code for load instr 288 when(io.loadIn(i).bits.lq_data_wen_dup(1)){ 289 uop(loadWbIndex).pdest := io.loadIn(i).bits.uop.pdest 290 } 291 when(io.loadIn(i).bits.lq_data_wen_dup(2)){ 292 uop(loadWbIndex).cf := io.loadIn(i).bits.uop.cf 293 } 294 when(io.loadIn(i).bits.lq_data_wen_dup(3)){ 295 uop(loadWbIndex).ctrl := io.loadIn(i).bits.uop.ctrl 296 } 297 when(io.loadIn(i).bits.lq_data_wen_dup(4)){ 298 uop(loadWbIndex).debugInfo := io.loadIn(i).bits.uop.debugInfo 299 } 300 when(io.loadIn(i).bits.lq_data_wen_dup(5)){ 301 vaddrTriggerResultModule.io.waddr(i) := loadWbIndex 302 vaddrTriggerResultModule.io.wdata(i) := io.trigger(i).hitLoadAddrTriggerHitVec 303 vaddrTriggerResultModule.io.wen(i) := true.B 304 } 305 306 when(io.loadPaddrIn(i).valid) { 307 dataModule.io.paddr.wen(i) := true.B 308 dataModule.io.paddr.waddr(i) := io.loadPaddrIn(i).bits.lqIdx.value 309 dataModule.io.paddr.wdata(i) := io.loadPaddrIn(i).bits.paddr 310 } 311 312 // vaddrModule write is delayed, as vaddrModule will not be read right after write 313 vaddrModule.io.waddr(i) := RegNext(loadWbIndex) 314 vaddrModule.io.wdata(i) := RegNext(io.loadIn(i).bits.vaddr) 315 vaddrModule.io.wen(i) := RegNext(io.loadIn(i).fire()) 316 } 317 318 when(io.refill.valid) { 319 XSDebug("miss resp: paddr:0x%x data %x\n", io.refill.bits.addr, io.refill.bits.data) 320 } 321 322 // Refill 64 bit in a cycle 323 // Refill data comes back from io.dcache.resp 324 dataModule.io.refill.valid := io.refill.valid 325 dataModule.io.refill.paddr := io.refill.bits.addr 326 dataModule.io.refill.data := io.refill.bits.data 327 328 val s2_dcache_require_replay = WireInit(VecInit((0 until LoadPipelineWidth).map(i =>{ 329 RegNext(io.loadIn(i).fire()) && RegNext(io.s2_dcache_require_replay(i)) 330 }))) 331 dontTouch(s2_dcache_require_replay) 332 333 (0 until LoadQueueSize).map(i => { 334 dataModule.io.refill.refillMask(i) := allocated(i) && miss(i) 335 when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) { 336 datavalid(i) := true.B 337 miss(i) := false.B 338 when(!s2_dcache_require_replay.asUInt.orR){ 339 refilling(i) := true.B 340 } 341 when(io.refill.bits.error) { 342 error(i) := true.B 343 } 344 } 345 }) 346 347 for (i <- 0 until LoadPipelineWidth) { 348 val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value 349 val lastCycleLoadWbIndex = RegNext(loadWbIndex) 350 // update miss state in load s3 351 if(!EnableFastForward){ 352 // s2_dcache_require_replay will be used to update lq flag 1 cycle after for better timing 353 // 354 // io.s2_dcache_require_replay comes from dcache miss req reject, which is quite slow to generate 355 when(s2_dcache_require_replay(i)) { 356 // do not writeback if that inst will be resend from rs 357 // rob writeback will not be triggered by a refill before inst replay 358 miss(lastCycleLoadWbIndex) := false.B // disable refill listening 359 datavalid(lastCycleLoadWbIndex) := false.B // disable refill listening 360 assert(!datavalid(lastCycleLoadWbIndex)) 361 } 362 } 363 // update load error state in load s3 364 when(RegNext(io.loadIn(i).fire()) && io.s3_delayed_load_error(i)){ 365 uop(lastCycleLoadWbIndex).cf.exceptionVec(loadAccessFault) := true.B 366 } 367 // update inst replay from fetch flag in s3 368 when(RegNext(io.loadIn(i).fire()) && io.s3_replay_from_fetch(i)){ 369 uop(lastCycleLoadWbIndex).ctrl.replayInst := true.B 370 } 371 } 372 373 374 // Writeback up to 2 missed load insts to CDB 375 // 376 // Pick 2 missed load (data refilled), write them back to cdb 377 // 2 refilled load will be selected from even/odd entry, separately 378 379 // Stage 0 380 // Generate writeback indexes 381 382 def getRemBits(input: UInt)(rem: Int): UInt = { 383 VecInit((0 until LoadQueueSize / LoadPipelineWidth).map(i => { input(LoadPipelineWidth * i + rem) })).asUInt 384 } 385 386 val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle 387 val loadWbSelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid 388 389 val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => { 390 // allocated(i) && !writebacked(i) && (datavalid(i) || refilling(i)) 391 allocated(i) && !writebacked(i) && datavalid(i) // query refilling will cause bad timing 392 })).asUInt() // use uint instead vec to reduce verilog lines 393 val remDeqMask = Seq.tabulate(LoadPipelineWidth)(getRemBits(deqMask)(_)) 394 // generate lastCycleSelect mask 395 val remFireMask = Seq.tabulate(LoadPipelineWidth)(rem => getRemBits(UIntToOH(loadWbSel(rem)))(rem)) 396 // generate real select vec 397 def toVec(a: UInt): Vec[Bool] = { 398 VecInit(a.asBools) 399 } 400 val loadRemSelVecFire = Seq.tabulate(LoadPipelineWidth)(rem => getRemBits(loadWbSelVec)(rem) & ~remFireMask(rem)) 401 val loadRemSelVecNotFire = Seq.tabulate(LoadPipelineWidth)(getRemBits(loadWbSelVec)(_)) 402 val loadRemSel = Seq.tabulate(LoadPipelineWidth)(rem => Mux( 403 io.ldout(rem).fire(), 404 getFirstOne(toVec(loadRemSelVecFire(rem)), remDeqMask(rem)), 405 getFirstOne(toVec(loadRemSelVecNotFire(rem)), remDeqMask(rem)) 406 )) 407 408 409 val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) 410 val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool())) 411 (0 until LoadPipelineWidth).foreach(index => { 412 loadWbSelGen(index) := ( 413 if (LoadPipelineWidth > 1) Cat(loadRemSel(index), index.U(log2Ceil(LoadPipelineWidth).W)) 414 else loadRemSel(index) 415 ) 416 loadWbSelVGen(index) := Mux(io.ldout(index).fire, loadRemSelVecFire(index).asUInt.orR, loadRemSelVecNotFire(index).asUInt.orR) 417 }) 418 419 (0 until LoadPipelineWidth).map(i => { 420 loadWbSel(i) := RegNext(loadWbSelGen(i)) 421 loadWbSelV(i) := RegNext(loadWbSelVGen(i), init = false.B) 422 when(io.ldout(i).fire()){ 423 // Mark them as writebacked, so they will not be selected in the next cycle 424 writebacked(loadWbSel(i)) := true.B 425 } 426 }) 427 428 // Stage 1 429 // Use indexes generated in cycle 0 to read data 430 // writeback data to cdb 431 (0 until LoadPipelineWidth).map(i => { 432 // data select 433 dataModule.io.wb.raddr(i) := loadWbSelGen(i) 434 val rdata = dataModule.io.wb.rdata(i).data 435 val seluop = uop(loadWbSel(i)) 436 val func = seluop.ctrl.fuOpType 437 val raddr = dataModule.io.wb.rdata(i).paddr 438 val rdataSel = LookupTree(raddr(2, 0), List( 439 "b000".U -> rdata(63, 0), 440 "b001".U -> rdata(63, 8), 441 "b010".U -> rdata(63, 16), 442 "b011".U -> rdata(63, 24), 443 "b100".U -> rdata(63, 32), 444 "b101".U -> rdata(63, 40), 445 "b110".U -> rdata(63, 48), 446 "b111".U -> rdata(63, 56) 447 )) 448 val rdataPartialLoad = rdataHelper(seluop, rdataSel) 449 450 // writeback missed int/fp load 451 // 452 // Int load writeback will finish (if not blocked) in one cycle 453 io.ldout(i).bits.uop := seluop 454 io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr) 455 io.ldout(i).bits.data := rdataPartialLoad 456 io.ldout(i).bits.redirectValid := false.B 457 io.ldout(i).bits.redirect := DontCare 458 io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i)) 459 io.ldout(i).bits.debug.isPerfCnt := false.B 460 io.ldout(i).bits.debug.paddr := debug_paddr(loadWbSel(i)) 461 io.ldout(i).bits.debug.vaddr := vaddrModule.io.rdata(i+1) 462 io.ldout(i).bits.fflags := DontCare 463 io.ldout(i).valid := loadWbSelV(i) 464 465 when(io.ldout(i).fire()) { 466 XSInfo("int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n", 467 io.ldout(i).bits.uop.robIdx.asUInt, 468 io.ldout(i).bits.uop.lqIdx.asUInt, 469 io.ldout(i).bits.uop.cf.pc, 470 debug_mmio(loadWbSel(i)) 471 ) 472 } 473 474 }) 475 476 /** 477 * Load commits 478 * 479 * When load commited, mark it as !allocated and move deqPtrExt forward. 480 */ 481 (0 until CommitWidth).map(i => { 482 when(commitCount > i.U){ 483 allocated((deqPtrExt+i.U).value) := false.B 484 XSError(!allocated((deqPtrExt+i.U).value), s"why commit invalid entry $i?\n") 485 } 486 }) 487 488 def getFirstOne(mask: Vec[Bool], startMask: UInt) = { 489 val length = mask.length 490 val highBits = (0 until length).map(i => mask(i) & ~startMask(i)) 491 val highBitsUint = Cat(highBits.reverse) 492 PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt)) 493 } 494 495 def getOldest[T <: XSBundleWithMicroOp](valid: Seq[Bool], bits: Seq[T]): (Seq[Bool], Seq[T]) = { 496 assert(valid.length == bits.length) 497 assert(isPow2(valid.length)) 498 if (valid.length == 1) { 499 (valid, bits) 500 } else if (valid.length == 2) { 501 val res = Seq.fill(2)(Wire(ValidIO(chiselTypeOf(bits(0))))) 502 for (i <- res.indices) { 503 res(i).valid := valid(i) 504 res(i).bits := bits(i) 505 } 506 val oldest = Mux(valid(0) && valid(1), Mux(isAfter(bits(0).uop.robIdx, bits(1).uop.robIdx), res(1), res(0)), Mux(valid(0) && !valid(1), res(0), res(1))) 507 (Seq(oldest.valid), Seq(oldest.bits)) 508 } else { 509 val left = getOldest(valid.take(valid.length / 2), bits.take(valid.length / 2)) 510 val right = getOldest(valid.takeRight(valid.length / 2), bits.takeRight(valid.length / 2)) 511 getOldest(left._1 ++ right._1, left._2 ++ right._2) 512 } 513 } 514 515 def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = { 516 assert(valid.length == uop.length) 517 val length = valid.length 518 (0 until length).map(i => { 519 (0 until length).map(j => { 520 Mux(valid(i) && valid(j), 521 isAfter(uop(i).robIdx, uop(j).robIdx), 522 Mux(!valid(i), true.B, false.B)) 523 }) 524 }) 525 } 526 527 /** 528 * Store-Load Memory violation detection 529 * 530 * When store writes back, it searches LoadQueue for younger load instructions 531 * with the same load physical address. They loaded wrong data and need re-execution. 532 * 533 * Cycle 0: Store Writeback 534 * Generate match vector for store address with rangeMask(stPtr, enqPtr). 535 * Besides, load instructions in LoadUnit_S1 and S2 are also checked. 536 * Cycle 1: Redirect Generation 537 * There're three possible types of violations, up to 6 possible redirect requests. 538 * Choose the oldest load (part 1). (4 + 2) -> (1 + 2) 539 * Cycle 2: Redirect Fire 540 * Choose the oldest load (part 2). (3 -> 1) 541 * Prepare redirect request according to the detected violation. 542 * Fire redirect request (if valid) 543 */ 544 545 // stage 0: lq l1 wb l1 wb lq 546 // | | | | | | (paddr match) 547 // stage 1: lq l1 wb l1 wb lq 548 // | | | | | | 549 // | |------------| | 550 // | | | 551 // stage 2: lq l1wb lq 552 // | | | 553 // -------------------- 554 // | 555 // rollback req 556 io.load_s1 := DontCare 557 def detectRollback(i: Int) = { 558 val startIndex = io.storeIn(i).bits.uop.lqIdx.value 559 val lqIdxMask = UIntToMask(startIndex, LoadQueueSize) 560 val xorMask = lqIdxMask ^ enqMask 561 val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag 562 val stToEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask) 563 564 // check if load already in lq needs to be rolledback 565 dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr 566 dataModule.io.violation(i).mask := io.storeIn(i).bits.mask 567 val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask) 568 val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => { 569 allocated(j) && stToEnqPtrMask(j) && (datavalid(j) || miss(j)) 570 }))) 571 val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => { 572 addrMaskMatch(j) && entryNeedCheck(j) 573 })) 574 val lqViolation = lqViolationVec.asUInt().orR() && RegNext(!io.storeIn(i).bits.miss) 575 val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask)) 576 val lqViolationUop = uop(lqViolationIndex) 577 // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag 578 // lqViolationUop.lqIdx.value := lqViolationIndex 579 XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n") 580 581 // when l/s writeback to rob together, check if rollback is needed 582 val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => { 583 io.loadIn(j).valid && 584 isAfter(io.loadIn(j).bits.uop.robIdx, io.storeIn(i).bits.uop.robIdx) && 585 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) && 586 (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR 587 }))) 588 val wbViolation = wbViolationVec.asUInt().orR() && RegNext(io.storeIn(i).valid && !io.storeIn(i).bits.miss) 589 val wbViolationUop = getOldest(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits))))._2(0).uop 590 XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n") 591 592 // check if rollback is needed for load in l1 593 val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => { 594 io.load_s1(j).valid && // L1 valid 595 isAfter(io.load_s1(j).uop.robIdx, io.storeIn(i).bits.uop.robIdx) && 596 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) && 597 (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR 598 }))) 599 val l1Violation = l1ViolationVec.asUInt().orR() && RegNext(io.storeIn(i).valid && !io.storeIn(i).bits.miss) 600 val load_s1 = Wire(Vec(LoadPipelineWidth, new XSBundleWithMicroOp)) 601 (0 until LoadPipelineWidth).foreach(i => load_s1(i).uop := io.load_s1(i).uop) 602 val l1ViolationUop = getOldest(l1ViolationVec, RegNext(load_s1))._2(0).uop 603 XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n") 604 605 XSDebug( 606 l1Violation, 607 "need rollback (l1 load) pc %x robidx %d target %x\n", 608 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, l1ViolationUop.robIdx.asUInt 609 ) 610 XSDebug( 611 lqViolation, 612 "need rollback (ld wb before store) pc %x robidx %d target %x\n", 613 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, lqViolationUop.robIdx.asUInt 614 ) 615 XSDebug( 616 wbViolation, 617 "need rollback (ld/st wb together) pc %x robidx %d target %x\n", 618 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, wbViolationUop.robIdx.asUInt 619 ) 620 621 ((lqViolation, lqViolationUop), (wbViolation, wbViolationUop), (l1Violation, l1ViolationUop)) 622 } 623 624 def rollbackSel(a: Valid[MicroOpRbExt], b: Valid[MicroOpRbExt]): ValidIO[MicroOpRbExt] = { 625 Mux( 626 a.valid, 627 Mux( 628 b.valid, 629 Mux(isAfter(a.bits.uop.robIdx, b.bits.uop.robIdx), b, a), // a,b both valid, sel oldest 630 a // sel a 631 ), 632 b // sel b 633 ) 634 } 635 val lastCycleRedirect = RegNext(io.brqRedirect) 636 val lastlastCycleRedirect = RegNext(lastCycleRedirect) 637 638 // S2: select rollback (part1) and generate rollback request 639 // rollback check 640 // Wb/L1 rollback seq check is done in s2 641 val rollbackWb = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt))) 642 val rollbackL1 = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt))) 643 val rollbackL1Wb = Wire(Vec(StorePipelineWidth*2, Valid(new MicroOpRbExt))) 644 // Lq rollback seq check is done in s3 (next stage), as getting rollbackLq MicroOp is slow 645 val rollbackLq = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt))) 646 // store ftq index for store set update 647 val stFtqIdxS2 = Wire(Vec(StorePipelineWidth, new FtqPtr)) 648 val stFtqOffsetS2 = Wire(Vec(StorePipelineWidth, UInt(log2Up(PredictWidth).W))) 649 for (i <- 0 until StorePipelineWidth) { 650 val detectedRollback = detectRollback(i) 651 rollbackLq(i).valid := detectedRollback._1._1 && RegNext(io.storeIn(i).valid) 652 rollbackLq(i).bits.uop := detectedRollback._1._2 653 rollbackLq(i).bits.flag := i.U 654 rollbackWb(i).valid := detectedRollback._2._1 && RegNext(io.storeIn(i).valid) 655 rollbackWb(i).bits.uop := detectedRollback._2._2 656 rollbackWb(i).bits.flag := i.U 657 rollbackL1(i).valid := detectedRollback._3._1 && RegNext(io.storeIn(i).valid) 658 rollbackL1(i).bits.uop := detectedRollback._3._2 659 rollbackL1(i).bits.flag := i.U 660 rollbackL1Wb(2*i) := rollbackL1(i) 661 rollbackL1Wb(2*i+1) := rollbackWb(i) 662 stFtqIdxS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqPtr) 663 stFtqOffsetS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqOffset) 664 } 665 666 val rollbackL1WbSelected = ParallelOperation(rollbackL1Wb, rollbackSel) 667 val rollbackL1WbVReg = RegNext(rollbackL1WbSelected.valid) 668 val rollbackL1WbReg = RegEnable(rollbackL1WbSelected.bits, rollbackL1WbSelected.valid) 669 val rollbackLqVReg = rollbackLq.map(x => RegNext(x.valid)) 670 val rollbackLqReg = rollbackLq.map(x => RegEnable(x.bits, x.valid)) 671 672 // S3: select rollback (part2), generate rollback request, then fire rollback request 673 // Note that we use robIdx - 1.U to flush the load instruction itself. 674 // Thus, here if last cycle's robIdx equals to this cycle's robIdx, it still triggers the redirect. 675 676 val rollbackValidVec = rollbackL1WbVReg +: rollbackLqVReg 677 val rollbackUopExtVec = rollbackL1WbReg +: rollbackLqReg 678 679 // select uop in parallel 680 val mask = getAfterMask(rollbackValidVec, rollbackUopExtVec.map(i => i.uop)) 681 val lqs = getOldest(rollbackLqVReg, rollbackLqReg) 682 val rollbackUopExt = getOldest(lqs._1 :+ rollbackL1WbVReg, lqs._2 :+ rollbackL1WbReg)._2(0) 683 val stFtqIdxS3 = RegNext(stFtqIdxS2) 684 val stFtqOffsetS3 = RegNext(stFtqOffsetS2) 685 val rollbackUop = rollbackUopExt.uop 686 val rollbackStFtqIdx = stFtqIdxS3(rollbackUopExt.flag) 687 val rollbackStFtqOffset = stFtqOffsetS3(rollbackUopExt.flag) 688 689 // check if rollback request is still valid in parallel 690 val rollbackValidVecChecked = Wire(Vec(LoadPipelineWidth + 1, Bool())) 691 for(((v, uop), idx) <- rollbackValidVec.zip(rollbackUopExtVec.map(i => i.uop)).zipWithIndex) { 692 rollbackValidVecChecked(idx) := v && 693 (!lastCycleRedirect.valid || isBefore(uop.robIdx, lastCycleRedirect.bits.robIdx)) && 694 (!lastlastCycleRedirect.valid || isBefore(uop.robIdx, lastlastCycleRedirect.bits.robIdx)) 695 } 696 697 io.rollback.bits.robIdx := rollbackUop.robIdx 698 io.rollback.bits.ftqIdx := rollbackUop.cf.ftqPtr 699 io.rollback.bits.stFtqIdx := rollbackStFtqIdx 700 io.rollback.bits.ftqOffset := rollbackUop.cf.ftqOffset 701 io.rollback.bits.stFtqOffset := rollbackStFtqOffset 702 io.rollback.bits.level := RedirectLevel.flush 703 io.rollback.bits.interrupt := DontCare 704 io.rollback.bits.cfiUpdate := DontCare 705 io.rollback.bits.cfiUpdate.target := rollbackUop.cf.pc 706 io.rollback.bits.debug_runahead_checkpoint_id := rollbackUop.debugInfo.runahead_checkpoint_id 707 // io.rollback.bits.pc := DontCare 708 709 io.rollback.valid := rollbackValidVecChecked.asUInt.orR 710 711 when(io.rollback.valid) { 712 // XSDebug("Mem rollback: pc %x robidx %d\n", io.rollback.bits.cfi, io.rollback.bits.robIdx.asUInt) 713 } 714 715 /** 716 * Load-Load Memory violation detection 717 * 718 * When load arrives load_s1, it searches LoadQueue for younger load instructions 719 * with the same load physical address. If younger load has been released (or observed), 720 * the younger load needs to be re-execed. 721 * 722 * For now, if re-exec it found to be needed in load_s1, we mark the older load as replayInst, 723 * the two loads will be replayed if the older load becomes the head of rob. 724 * 725 * When dcache releases a line, mark all writebacked entrys in load queue with 726 * the same line paddr as released. 727 */ 728 729 // Load-Load Memory violation query 730 val deqRightMask = UIntToMask.rightmask(deqPtr, LoadQueueSize) 731 (0 until LoadPipelineWidth).map(i => { 732 dataModule.io.release_violation(i).paddr := io.loadViolationQuery(i).req.bits.paddr 733 io.loadViolationQuery(i).req.ready := true.B 734 io.loadViolationQuery(i).resp.valid := RegNext(io.loadViolationQuery(i).req.fire()) 735 // Generate real violation mask 736 // Note that we use UIntToMask.rightmask here 737 val startIndex = io.loadViolationQuery(i).req.bits.uop.lqIdx.value 738 val lqIdxMask = UIntToMask(startIndex, LoadQueueSize) 739 val xorMask = lqIdxMask ^ enqMask 740 val sameFlag = io.loadViolationQuery(i).req.bits.uop.lqIdx.flag === enqPtrExt(0).flag 741 val ldToEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask) 742 val ldld_violation_mask_gen_1 = WireInit(VecInit((0 until LoadQueueSize).map(j => { 743 ldToEnqPtrMask(j) && // the load is younger than current load 744 allocated(j) && // entry is valid 745 released(j) && // cacheline is released 746 (datavalid(j) || miss(j)) // paddr is valid 747 }))) 748 val ldld_violation_mask_gen_2 = WireInit(VecInit((0 until LoadQueueSize).map(j => { 749 dataModule.io.release_violation(i).match_mask(j)// addr match 750 // addr match result is slow to generate, we RegNext() it 751 }))) 752 val ldld_violation_mask = RegNext(ldld_violation_mask_gen_1).asUInt & RegNext(ldld_violation_mask_gen_2).asUInt 753 dontTouch(ldld_violation_mask) 754 ldld_violation_mask.suggestName("ldldViolationMask_" + i) 755 io.loadViolationQuery(i).resp.bits.have_violation := ldld_violation_mask.orR 756 }) 757 758 // "released" flag update 759 // 760 // When io.release.valid (release1cycle.valid), it uses the last ld-ld paddr cam port to 761 // update release flag in 1 cycle 762 763 when(release1cycle.valid){ 764 // Take over ld-ld paddr cam port 765 dataModule.io.release_violation.takeRight(1)(0).paddr := release1cycle.bits.paddr 766 io.loadViolationQuery.takeRight(1)(0).req.ready := false.B 767 } 768 769 when(release2cycle.valid){ 770 // If a load comes in that cycle, we can not judge if it has ld-ld violation 771 // We replay that load inst from RS 772 io.loadViolationQuery.map(i => i.req.ready := 773 // use lsu side release2cycle_dup_lsu paddr for better timing 774 !i.req.bits.paddr(PAddrBits-1, DCacheLineOffset) === release2cycle_dup_lsu.bits.paddr(PAddrBits-1, DCacheLineOffset) 775 ) 776 // io.loadViolationQuery.map(i => i.req.ready := false.B) // For better timing 777 } 778 779 (0 until LoadQueueSize).map(i => { 780 when(RegNext(dataModule.io.release_violation.takeRight(1)(0).match_mask(i) && 781 allocated(i) && 782 datavalid(i) && 783 release1cycle.valid 784 )){ 785 // Note: if a load has missed in dcache and is waiting for refill in load queue, 786 // its released flag still needs to be set as true if addr matches. 787 released(i) := true.B 788 } 789 }) 790 791 /** 792 * Memory mapped IO / other uncached operations 793 * 794 * States: 795 * (1) writeback from store units: mark as pending 796 * (2) when they reach ROB's head, they can be sent to uncache channel 797 * (3) response from uncache channel: mark as datavalid 798 * (4) writeback to ROB (and other units): mark as writebacked 799 * (5) ROB commits the instruction: same as normal instructions 800 */ 801 //(2) when they reach ROB's head, they can be sent to uncache channel 802 val lqTailMmioPending = WireInit(pending(deqPtr)) 803 val lqTailAllocated = WireInit(allocated(deqPtr)) 804 val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4) 805 val uncacheState = RegInit(s_idle) 806 switch(uncacheState) { 807 is(s_idle) { 808 when(RegNext(io.rob.pendingld && lqTailMmioPending && lqTailAllocated)) { 809 uncacheState := s_req 810 } 811 } 812 is(s_req) { 813 when(io.uncache.req.fire()) { 814 uncacheState := s_resp 815 } 816 } 817 is(s_resp) { 818 when(io.uncache.resp.fire()) { 819 uncacheState := s_wait 820 } 821 } 822 is(s_wait) { 823 when(RegNext(io.rob.commit)) { 824 uncacheState := s_idle // ready for next mmio 825 } 826 } 827 } 828 io.uncache.req.valid := uncacheState === s_req 829 830 dataModule.io.uncache.raddr := deqPtrExtNext.value 831 832 io.uncache.req.bits.cmd := MemoryOpConstants.M_XRD 833 io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr 834 io.uncache.req.bits.data := dataModule.io.uncache.rdata.data 835 io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask 836 837 io.uncache.req.bits.id := DontCare 838 io.uncache.req.bits.instrtype := DontCare 839 840 io.uncache.resp.ready := true.B 841 842 when (io.uncache.req.fire()) { 843 pending(deqPtr) := false.B 844 845 XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n", 846 uop(deqPtr).cf.pc, 847 io.uncache.req.bits.addr, 848 io.uncache.req.bits.data, 849 io.uncache.req.bits.cmd, 850 io.uncache.req.bits.mask 851 ) 852 } 853 854 // (3) response from uncache channel: mark as datavalid 855 dataModule.io.uncache.wen := false.B 856 when(io.uncache.resp.fire()){ 857 datavalid(deqPtr) := true.B 858 dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0)) 859 dataModule.io.uncache.wen := true.B 860 861 XSDebug("uncache resp: data %x\n", io.refill.bits.data) 862 } 863 864 // Read vaddr for mem exception 865 // no inst will be commited 1 cycle before tval update 866 vaddrModule.io.raddr(0) := (deqPtrExt + commitCount).value 867 io.exceptionAddr.vaddr := vaddrModule.io.rdata(0) 868 869 // Read vaddr for debug 870 (0 until LoadPipelineWidth).map(i => { 871 vaddrModule.io.raddr(i+1) := loadWbSel(i) 872 }) 873 874 (0 until LoadPipelineWidth).map(i => { 875 vaddrTriggerResultModule.io.raddr(i) := loadWbSelGen(i) 876 io.trigger(i).lqLoadAddrTriggerHitVec := Mux( 877 loadWbSelV(i), 878 vaddrTriggerResultModule.io.rdata(i), 879 VecInit(Seq.fill(3)(false.B)) 880 ) 881 }) 882 883 // misprediction recovery / exception redirect 884 // invalidate lq term using robIdx 885 val needCancel = Wire(Vec(LoadQueueSize, Bool())) 886 for (i <- 0 until LoadQueueSize) { 887 needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i) 888 when (needCancel(i)) { 889 allocated(i) := false.B 890 } 891 } 892 893 /** 894 * update pointers 895 */ 896 val lastEnqCancel = PopCount(RegNext(VecInit(canEnqueue.zip(enqCancel).map(x => x._1 && x._2)))) 897 val lastCycleCancelCount = PopCount(RegNext(needCancel)) 898 val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept, PopCount(io.enq.req.map(_.valid)), 0.U) 899 when (lastCycleRedirect.valid) { 900 // we recover the pointers in the next cycle after redirect 901 enqPtrExt := VecInit(enqPtrExt.map(_ - (lastCycleCancelCount + lastEnqCancel))) 902 }.otherwise { 903 enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber)) 904 } 905 906 deqPtrExtNext := deqPtrExt + commitCount 907 deqPtrExt := deqPtrExtNext 908 909 io.lqCancelCnt := RegNext(lastCycleCancelCount + lastEnqCancel) 910 911 /** 912 * misc 913 */ 914 // perf counter 915 QueuePerf(LoadQueueSize, validCount, !allowEnqueue) 916 io.lqFull := !allowEnqueue 917 XSPerfAccumulate("rollback", io.rollback.valid) // rollback redirect generated 918 XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req 919 XSPerfAccumulate("mmioCnt", io.uncache.req.fire()) 920 XSPerfAccumulate("refill", io.refill.valid) 921 XSPerfAccumulate("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire())))) 922 XSPerfAccumulate("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready)))) 923 XSPerfAccumulate("utilization_miss", PopCount((0 until LoadQueueSize).map(i => allocated(i) && miss(i)))) 924 925 val perfValidCount = RegNext(validCount) 926 927 val perfEvents = Seq( 928 ("rollback ", io.rollback.valid), 929 ("mmioCycle ", uncacheState =/= s_idle), 930 ("mmio_Cnt ", io.uncache.req.fire()), 931 ("refill ", io.refill.valid), 932 ("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire())))), 933 ("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready)))), 934 ("ltq_1_4_valid ", (perfValidCount < (LoadQueueSize.U/4.U))), 935 ("ltq_2_4_valid ", (perfValidCount > (LoadQueueSize.U/4.U)) & (perfValidCount <= (LoadQueueSize.U/2.U))), 936 ("ltq_3_4_valid ", (perfValidCount > (LoadQueueSize.U/2.U)) & (perfValidCount <= (LoadQueueSize.U*3.U/4.U))), 937 ("ltq_4_4_valid ", (perfValidCount > (LoadQueueSize.U*3.U/4.U))) 938 ) 939 generatePerfEvent() 940 941 // debug info 942 XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr) 943 944 def PrintFlag(flag: Bool, name: String): Unit = { 945 when(flag) { 946 XSDebug(false, true.B, name) 947 }.otherwise { 948 XSDebug(false, true.B, " ") 949 } 950 } 951 952 for (i <- 0 until LoadQueueSize) { 953 XSDebug(i + " pc %x pa %x ", uop(i).cf.pc, debug_paddr(i)) 954 PrintFlag(allocated(i), "a") 955 PrintFlag(allocated(i) && datavalid(i), "v") 956 PrintFlag(allocated(i) && writebacked(i), "w") 957 PrintFlag(allocated(i) && miss(i), "m") 958 PrintFlag(allocated(i) && pending(i), "p") 959 XSDebug(false, true.B, "\n") 960 } 961 962} 963