1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16package xiangshan.mem 17 18import chisel3._ 19import chisel3.util._ 20import org.chipsalliance.cde.config._ 21import xiangshan._ 22import xiangshan.backend.rob.{RobPtr, RobLsqIO} 23import xiangshan.cache._ 24import xiangshan.backend.fu.fpu.FPU 25import xiangshan.cache._ 26import xiangshan.cache.mmu._ 27import xiangshan.frontend.FtqPtr 28import xiangshan.ExceptionNO._ 29import xiangshan.cache.wpu.ReplayCarry 30import xiangshan.mem.mdp._ 31import utils._ 32import utility._ 33 34object LoadReplayCauses { 35 // these causes have priority, lower coding has higher priority. 36 // when load replay happens, load unit will select highest priority 37 // from replay causes vector 38 39 /* 40 * Warning: 41 * ************************************************************ 42 * * Don't change the priority. If the priority is changed, * 43 * * deadlock may occur. If you really need to change or * 44 * * add priority, please ensure that no deadlock will occur. * 45 * ************************************************************ 46 * 47 */ 48 // st-ld violation re-execute check 49 val C_MA = 0 50 // tlb miss check 51 val C_TM = 1 52 // store-to-load-forwarding check 53 val C_FF = 2 54 // dcache replay check 55 val C_DR = 3 56 // dcache miss check 57 val C_DM = 4 58 // wpu predict fail 59 val C_WF = 5 60 // dcache bank conflict check 61 val C_BC = 6 62 // RAR queue accept check 63 val C_RAR = 7 64 // RAW queue accept check 65 val C_RAW = 8 66 // st-ld violation 67 val C_NK = 9 68 // total causes 69 val allCauses = 10 70} 71 72class AgeDetector(numEntries: Int, numEnq: Int, regOut: Boolean = true)(implicit p: Parameters) extends XSModule { 73 val io = IO(new Bundle { 74 // NOTE: deq and enq may come at the same cycle. 75 val enq = Vec(numEnq, Input(UInt(numEntries.W))) 76 val deq = Input(UInt(numEntries.W)) 77 val ready = Input(UInt(numEntries.W)) 78 val out = Output(UInt(numEntries.W)) 79 }) 80 81 // age(i)(j): entry i enters queue before entry j 82 val age = Seq.fill(numEntries)(Seq.fill(numEntries)(RegInit(false.B))) 83 val nextAge = Seq.fill(numEntries)(Seq.fill(numEntries)(Wire(Bool()))) 84 85 // to reduce reg usage, only use upper matrix 86 def get_age(row: Int, col: Int): Bool = if (row <= col) age(row)(col) else !age(col)(row) 87 def get_next_age(row: Int, col: Int): Bool = if (row <= col) nextAge(row)(col) else !nextAge(col)(row) 88 def isFlushed(i: Int): Bool = io.deq(i) 89 def isEnqueued(i: Int, numPorts: Int = -1): Bool = { 90 val takePorts = if (numPorts == -1) io.enq.length else numPorts 91 takePorts match { 92 case 0 => false.B 93 case 1 => io.enq.head(i) && !isFlushed(i) 94 case n => VecInit(io.enq.take(n).map(_(i))).asUInt.orR && !isFlushed(i) 95 } 96 } 97 98 for ((row, i) <- nextAge.zipWithIndex) { 99 val thisValid = get_age(i, i) || isEnqueued(i) 100 for ((elem, j) <- row.zipWithIndex) { 101 when (isFlushed(i)) { 102 // (1) when entry i is flushed or dequeues, set row(i) to false.B 103 elem := false.B 104 }.elsewhen (isFlushed(j)) { 105 // (2) when entry j is flushed or dequeues, set column(j) to validVec 106 elem := thisValid 107 }.elsewhen (isEnqueued(i)) { 108 // (3) when entry i enqueues from port k, 109 // (3.1) if entry j enqueues from previous ports, set to false 110 // (3.2) otherwise, set to true if and only of entry j is invalid 111 // overall: !jEnqFromPreviousPorts && !jIsValid 112 val sel = io.enq.map(_(i)) 113 val result = (0 until numEnq).map(k => isEnqueued(j, k)) 114 // why ParallelMux: sel must be one-hot since enq is one-hot 115 elem := !get_age(j, j) && !ParallelMux(sel, result) 116 }.otherwise { 117 // default: unchanged 118 elem := get_age(i, j) 119 } 120 age(i)(j) := elem 121 } 122 } 123 124 def getOldest(get: (Int, Int) => Bool): UInt = { 125 VecInit((0 until numEntries).map(i => { 126 io.ready(i) & VecInit((0 until numEntries).map(j => if (i != j) !io.ready(j) || get(i, j) else true.B)).asUInt.andR 127 })).asUInt 128 } 129 val best = getOldest(get_age) 130 val nextBest = getOldest(get_next_age) 131 132 io.out := (if (regOut) best else nextBest) 133} 134 135object AgeDetector { 136 def apply(numEntries: Int, enq: Vec[UInt], deq: UInt, ready: UInt)(implicit p: Parameters): Valid[UInt] = { 137 val age = Module(new AgeDetector(numEntries, enq.length, regOut = true)) 138 age.io.enq := enq 139 age.io.deq := deq 140 age.io.ready:= ready 141 val out = Wire(Valid(UInt(deq.getWidth.W))) 142 out.valid := age.io.out.orR 143 out.bits := age.io.out 144 out 145 } 146} 147 148 149class LoadQueueReplay(implicit p: Parameters) extends XSModule 150 with HasDCacheParameters 151 with HasCircularQueuePtrHelper 152 with HasLoadHelper 153 with HasTlbConst 154 with HasPerfEvents 155{ 156 val io = IO(new Bundle() { 157 // control 158 val redirect = Flipped(ValidIO(new Redirect)) 159 160 // from load unit s3 161 val enq = Vec(LoadPipelineWidth, Flipped(Decoupled(new LqWriteBundle))) 162 163 // from sta s1 164 val storeAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) 165 166 // from std s1 167 val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new ExuOutput))) 168 169 // queue-based replay 170 val replay = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle)) 171 val refill = Flipped(ValidIO(new Refill)) 172 val tl_d_channel = Input(new DcacheToLduForwardIO) 173 174 // from StoreQueue 175 val stAddrReadySqPtr = Input(new SqPtr) 176 val stAddrReadyVec = Input(Vec(StoreQueueSize, Bool())) 177 val stDataReadySqPtr = Input(new SqPtr) 178 val stDataReadyVec = Input(Vec(StoreQueueSize, Bool())) 179 180 // 181 val sqEmpty = Input(Bool()) 182 val lqFull = Output(Bool()) 183 val ldWbPtr = Input(new LqPtr) 184 val rarFull = Input(Bool()) 185 val rawFull = Input(Bool()) 186 val l2_hint = Input(Valid(new L2ToL1Hint())) 187 val tlb_hint = Flipped(new TlbHintIO) 188 val tlbReplayDelayCycleCtrl = Vec(4, Input(UInt(ReSelectLen.W))) 189 190 val debugTopDown = new LoadQueueTopDownIO 191 }) 192 193 println("LoadQueueReplay size: " + LoadQueueReplaySize) 194 // LoadQueueReplay field: 195 // +-----------+---------+-------+-------------+--------+ 196 // | Allocated | MicroOp | VAddr | Cause | Flags | 197 // +-----------+---------+-------+-------------+--------+ 198 // Allocated : entry has been allocated already 199 // MicroOp : inst's microOp 200 // VAddr : virtual address 201 // Cause : replay cause 202 // Flags : rar/raw queue allocate flags 203 val allocated = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B))) // The control signals need to explicitly indicate the initial value 204 val scheduled = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B))) 205 val uop = Reg(Vec(LoadQueueReplaySize, new MicroOp)) 206 val vaddrModule = Module(new LqVAddrModule( 207 gen = UInt(VAddrBits.W), 208 numEntries = LoadQueueReplaySize, 209 numRead = LoadPipelineWidth, 210 numWrite = LoadPipelineWidth, 211 numWBank = LoadQueueNWriteBanks, 212 numWDelay = 2, 213 numCamPort = 0)) 214 vaddrModule.io := DontCare 215 val debug_vaddr = RegInit(VecInit(List.fill(LoadQueueReplaySize)(0.U(VAddrBits.W)))) 216 val cause = RegInit(VecInit(List.fill(LoadQueueReplaySize)(0.U(LoadReplayCauses.allCauses.W)))) 217 val blocking = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B))) 218 219 // freeliset: store valid entries index. 220 // +---+---+--------------+-----+-----+ 221 // | 0 | 1 | ...... | n-2 | n-1 | 222 // +---+---+--------------+-----+-----+ 223 val freeList = Module(new FreeList( 224 size = LoadQueueReplaySize, 225 allocWidth = LoadPipelineWidth, 226 freeWidth = 4, 227 enablePreAlloc = true, 228 moduleName = "LoadQueueReplay freelist" 229 )) 230 freeList.io := DontCare 231 /** 232 * used for re-select control 233 */ 234 val blockSqIdx = Reg(Vec(LoadQueueReplaySize, new SqPtr)) 235 // DCache miss block 236 val missMSHRId = RegInit(VecInit(List.fill(LoadQueueReplaySize)(0.U((log2Up(cfg.nMissEntries+1).W))))) 237 val tlbHintId = RegInit(VecInit(List.fill(LoadQueueReplaySize)(0.U((log2Up(loadfiltersize+1).W))))) 238 // Has this load already updated dcache replacement? 239 val replacementUpdated = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B))) 240 val missDbUpdated = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B))) 241 val trueCacheMissReplay = WireInit(VecInit(cause.map(_(LoadReplayCauses.C_DM)))) 242 val replayCarryReg = RegInit(VecInit(List.fill(LoadQueueReplaySize)(ReplayCarry(nWays, 0.U, false.B)))) 243 val dataInLastBeatReg = RegInit(VecInit(List.fill(LoadQueueReplaySize)(false.B))) 244 245 /** 246 * Enqueue 247 */ 248 val canEnqueue = io.enq.map(_.valid) 249 val cancelEnq = io.enq.map(enq => enq.bits.uop.robIdx.needFlush(io.redirect)) 250 val needReplay = io.enq.map(enq => enq.bits.rep_info.need_rep) 251 val hasExceptions = io.enq.map(enq => ExceptionNO.selectByFu(enq.bits.uop.cf.exceptionVec, lduCfg).asUInt.orR && !enq.bits.tlbMiss) 252 val loadReplay = io.enq.map(enq => enq.bits.isLoadReplay) 253 val needEnqueue = VecInit((0 until LoadPipelineWidth).map(w => { 254 canEnqueue(w) && !cancelEnq(w) && needReplay(w) && !hasExceptions(w) 255 })) 256 val canFreeVec = VecInit((0 until LoadPipelineWidth).map(w => { 257 canEnqueue(w) && loadReplay(w) && (!needReplay(w) || hasExceptions(w)) 258 })) 259 260 // select LoadPipelineWidth valid index. 261 val lqFull = freeList.io.empty 262 val lqFreeNums = freeList.io.validCount 263 264 // replay logic 265 // release logic generation 266 val storeAddrInSameCycleVec = Wire(Vec(LoadQueueReplaySize, Bool())) 267 val storeDataInSameCycleVec = Wire(Vec(LoadQueueReplaySize, Bool())) 268 val addrNotBlockVec = Wire(Vec(LoadQueueReplaySize, Bool())) 269 val dataNotBlockVec = Wire(Vec(LoadQueueReplaySize, Bool())) 270 val storeAddrValidVec = addrNotBlockVec.asUInt | storeAddrInSameCycleVec.asUInt 271 val storeDataValidVec = dataNotBlockVec.asUInt | storeDataInSameCycleVec.asUInt 272 273 // store data valid check 274 val stAddrReadyVec = io.stAddrReadyVec 275 val stDataReadyVec = io.stDataReadyVec 276 277 for (i <- 0 until LoadQueueReplaySize) { 278 // dequeue 279 // FIXME: store*Ptr is not accurate 280 dataNotBlockVec(i) := !isBefore(io.stDataReadySqPtr, blockSqIdx(i)) || stDataReadyVec(blockSqIdx(i).value) || io.sqEmpty // for better timing 281 addrNotBlockVec(i) := !isBefore(io.stAddrReadySqPtr, blockSqIdx(i)) || stAddrReadyVec(blockSqIdx(i).value) || io.sqEmpty // for better timing 282 283 // store address execute 284 storeAddrInSameCycleVec(i) := VecInit((0 until StorePipelineWidth).map(w => { 285 io.storeAddrIn(w).valid && 286 !io.storeAddrIn(w).bits.miss && 287 blockSqIdx(i) === io.storeAddrIn(w).bits.uop.sqIdx 288 })).asUInt.orR // for better timing 289 290 // store data execute 291 storeDataInSameCycleVec(i) := VecInit((0 until StorePipelineWidth).map(w => { 292 io.storeDataIn(w).valid && 293 blockSqIdx(i) === io.storeDataIn(w).bits.uop.sqIdx 294 })).asUInt.orR // for better timing 295 296 } 297 298 // store addr issue check 299 val stAddrDeqVec = Wire(Vec(LoadQueueReplaySize, Bool())) 300 (0 until LoadQueueReplaySize).map(i => { 301 stAddrDeqVec(i) := allocated(i) && storeAddrValidVec(i) 302 }) 303 304 // store data issue check 305 val stDataDeqVec = Wire(Vec(LoadQueueReplaySize, Bool())) 306 (0 until LoadQueueReplaySize).map(i => { 307 stDataDeqVec(i) := allocated(i) && storeDataValidVec(i) 308 }) 309 310 // update blocking condition 311 (0 until LoadQueueReplaySize).map(i => { 312 // case C_MA 313 when (cause(i)(LoadReplayCauses.C_MA)) { 314 blocking(i) := Mux(stAddrDeqVec(i), false.B, blocking(i)) 315 } 316 // case C_TM 317 when (cause(i)(LoadReplayCauses.C_TM)) { 318 blocking(i) := Mux(io.tlb_hint.resp.valid && 319 (io.tlb_hint.resp.bits.replay_all || 320 io.tlb_hint.resp.bits.id === tlbHintId(i)), false.B, blocking(i)) 321 } 322 // case C_FF 323 when (cause(i)(LoadReplayCauses.C_FF)) { 324 blocking(i) := Mux(stDataDeqVec(i), false.B, blocking(i)) 325 } 326 // case C_DM 327 when (cause(i)(LoadReplayCauses.C_DM)) { 328 blocking(i) := Mux(io.tl_d_channel.valid && io.tl_d_channel.mshrid === missMSHRId(i), false.B, blocking(i)) 329 } 330 // case C_RAR 331 when (cause(i)(LoadReplayCauses.C_RAR)) { 332 blocking(i) := Mux((!io.rarFull || !isAfter(uop(i).lqIdx, io.ldWbPtr)), false.B, blocking(i)) 333 } 334 // case C_RAW 335 when (cause(i)(LoadReplayCauses.C_RAW)) { 336 blocking(i) := Mux((!io.rawFull || !isAfter(uop(i).sqIdx, io.stAddrReadySqPtr)), false.B, blocking(i)) 337 } 338 }) 339 340 // Replay is splitted into 3 stages 341 require((LoadQueueReplaySize % LoadPipelineWidth) == 0) 342 def getRemBits(input: UInt)(rem: Int): UInt = { 343 VecInit((0 until LoadQueueReplaySize / LoadPipelineWidth).map(i => { input(LoadPipelineWidth * i + rem) })).asUInt 344 } 345 346 def getRemSeq(input: Seq[Seq[Bool]])(rem: Int) = { 347 (0 until LoadQueueReplaySize / LoadPipelineWidth).map(i => { input(LoadPipelineWidth * i + rem) }) 348 } 349 350 // stage1: select 2 entries and read their vaddr 351 val s0_oldestSel = Wire(Vec(LoadPipelineWidth, Valid(UInt(LoadQueueReplaySize.W)))) 352 val s1_can_go = Wire(Vec(LoadPipelineWidth, Bool())) 353 val s1_oldestSel = Wire(Vec(LoadPipelineWidth, Valid(UInt(log2Up(LoadQueueReplaySize + 1).W)))) 354 val s2_can_go = Wire(Vec(LoadPipelineWidth, Bool())) 355 val s2_oldestSel = Wire(Vec(LoadPipelineWidth, Valid(UInt(log2Up(LoadQueueReplaySize + 1).W)))) 356 357 // generate mask 358 val needCancel = Wire(Vec(LoadQueueReplaySize, Bool())) 359 // generate enq mask 360 val enqIndexOH = Wire(Vec(LoadPipelineWidth, UInt(LoadQueueReplaySize.W))) 361 val s0_loadEnqFireMask = io.enq.map(x => x.fire && !x.bits.isLoadReplay).zip(enqIndexOH).map(x => Mux(x._1, x._2, 0.U)) 362 val s0_remLoadEnqFireVec = s0_loadEnqFireMask.map(x => VecInit((0 until LoadPipelineWidth).map(rem => getRemBits(x)(rem)))) 363 val s0_remEnqSelVec = Seq.tabulate(LoadPipelineWidth)(w => VecInit(s0_remLoadEnqFireVec.map(x => x(w)))) 364 365 // generate free mask 366 val s0_loadFreeSelMask = RegNext(needCancel.asUInt) 367 val s0_remFreeSelVec = VecInit(Seq.tabulate(LoadPipelineWidth)(rem => getRemBits(s0_loadFreeSelMask)(rem))) 368 369 // l2 hint wakes up cache missed load 370 // l2 will send GrantData in next 2/3 cycle, wake up the missed load early and sent them to load pipe, so them will hit the data in D channel or mshr in load S1 371 val s0_loadHintWakeMask = VecInit((0 until LoadQueueReplaySize).map(i => { 372 allocated(i) && !scheduled(i) && cause(i)(LoadReplayCauses.C_DM) && blocking(i) && missMSHRId(i) === io.l2_hint.bits.sourceId && io.l2_hint.valid 373 })).asUInt 374 // l2 will send 2 beats data in 2 cycles, so if data needed by this load is in first beat, select it this cycle, otherwise next cycle 375 val s0_loadHintSelMask = s0_loadHintWakeMask & VecInit(dataInLastBeatReg.map(!_)).asUInt 376 val s0_remLoadHintSelMask = VecInit((0 until LoadPipelineWidth).map(rem => getRemBits(s0_loadHintSelMask)(rem))) 377 val s0_remHintSelValidVec = VecInit((0 until LoadPipelineWidth).map(rem => ParallelORR(s0_remLoadHintSelMask(rem)))) 378 val s0_hintSelValid = ParallelORR(s0_loadHintSelMask) 379 380 // wake up cache missed load 381 (0 until LoadQueueReplaySize).foreach(i => { 382 when(s0_loadHintWakeMask(i)) { 383 blocking(i) := false.B 384 } 385 }) 386 387 // generate replay mask 388 // replay select priority is given as follow 389 // 1. hint wake up load 390 // 2. higher priority load 391 // 3. lower priority load 392 val s0_loadHigherPriorityReplaySelMask = VecInit((0 until LoadQueueReplaySize).map(i => { 393 val hasHigherPriority = cause(i)(LoadReplayCauses.C_DM) || cause(i)(LoadReplayCauses.C_FF) 394 allocated(i) && !scheduled(i) && !blocking(i) && hasHigherPriority 395 })).asUInt // use uint instead vec to reduce verilog lines 396 val s0_remLoadHigherPriorityReplaySelMask = VecInit((0 until LoadPipelineWidth).map(rem => getRemBits(s0_loadHigherPriorityReplaySelMask)(rem))) 397 val s0_loadLowerPriorityReplaySelMask = VecInit((0 until LoadQueueReplaySize).map(i => { 398 val hasLowerPriority = !cause(i)(LoadReplayCauses.C_DM) && !cause(i)(LoadReplayCauses.C_FF) 399 allocated(i) && !scheduled(i) && !blocking(i) && hasLowerPriority 400 })).asUInt // use uint instead vec to reduce verilog lines 401 val s0_remLoadLowerPriorityReplaySelMask = VecInit((0 until LoadPipelineWidth).map(rem => getRemBits(s0_loadLowerPriorityReplaySelMask)(rem))) 402 val s0_loadNormalReplaySelMask = s0_loadLowerPriorityReplaySelMask | s0_loadHigherPriorityReplaySelMask | s0_loadHintSelMask 403 val s0_remNormalReplaySelVec = VecInit((0 until LoadPipelineWidth).map(rem => s0_remLoadLowerPriorityReplaySelMask(rem) | s0_remLoadHigherPriorityReplaySelMask(rem) | s0_remLoadHintSelMask(rem))) 404 val s0_remPriorityReplaySelVec = VecInit((0 until LoadPipelineWidth).map(rem => { 405 Mux(s0_remHintSelValidVec(rem), s0_remLoadHintSelMask(rem), 406 Mux(ParallelORR(s0_remLoadHigherPriorityReplaySelMask(rem)), s0_remLoadHigherPriorityReplaySelMask(rem), s0_remLoadLowerPriorityReplaySelMask(rem))) 407 })) 408 /****************************************************************************************************** 409 * WARNING: Make sure that OldestSelectStride must less than or equal stages of load pipeline. * 410 ****************************************************************************************************** 411 */ 412 val OldestSelectStride = 4 413 val oldestPtrExt = (0 until OldestSelectStride).map(i => io.ldWbPtr + i.U) 414 val s0_oldestMatchMaskVec = (0 until LoadQueueReplaySize).map(i => (0 until OldestSelectStride).map(j => s0_loadNormalReplaySelMask(i) && uop(i).lqIdx === oldestPtrExt(j))) 415 val s0_remOldsetMatchMaskVec = (0 until LoadPipelineWidth).map(rem => getRemSeq(s0_oldestMatchMaskVec.map(_.take(1)))(rem)) 416 val s0_remOlderMatchMaskVec = (0 until LoadPipelineWidth).map(rem => getRemSeq(s0_oldestMatchMaskVec.map(_.drop(1)))(rem)) 417 val s0_remOldestSelVec = VecInit(Seq.tabulate(LoadPipelineWidth)(rem => { 418 VecInit((0 until LoadQueueReplaySize / LoadPipelineWidth).map(i => { 419 Mux(ParallelORR(s0_remOldsetMatchMaskVec(rem).map(_(0))), s0_remOldsetMatchMaskVec(rem)(i)(0), s0_remOlderMatchMaskVec(rem)(i).reduce(_|_)) 420 })).asUInt 421 })) 422 val s0_remOldestHintSelVec = s0_remOldestSelVec.zip(s0_remLoadHintSelMask).map { 423 case(oldestVec, hintVec) => oldestVec & hintVec 424 } 425 426 // select oldest logic 427 s0_oldestSel := VecInit((0 until LoadPipelineWidth).map(rport => { 428 // select enqueue earlest inst 429 val ageOldest = AgeDetector(LoadQueueReplaySize / LoadPipelineWidth, s0_remEnqSelVec(rport), s0_remFreeSelVec(rport), s0_remPriorityReplaySelVec(rport)) 430 assert(!(ageOldest.valid && PopCount(ageOldest.bits) > 1.U), "oldest index must be one-hot!") 431 val ageOldestValid = ageOldest.valid 432 val ageOldestIndexOH = ageOldest.bits 433 434 // select program order oldest 435 val l2HintFirst = io.l2_hint.valid && ParallelORR(s0_remOldestHintSelVec(rport)) 436 val issOldestValid = l2HintFirst || ParallelORR(s0_remOldestSelVec(rport)) 437 val issOldestIndexOH = Mux(l2HintFirst, PriorityEncoderOH(s0_remOldestHintSelVec(rport)), PriorityEncoderOH(s0_remOldestSelVec(rport))) 438 439 val oldest = Wire(Valid(UInt())) 440 val oldestSel = Mux(issOldestValid, issOldestIndexOH, ageOldestIndexOH) 441 val oldestBitsVec = Wire(Vec(LoadQueueReplaySize, Bool())) 442 443 require((LoadQueueReplaySize % LoadPipelineWidth) == 0) 444 oldestBitsVec.foreach(e => e := false.B) 445 for (i <- 0 until LoadQueueReplaySize / LoadPipelineWidth) { 446 oldestBitsVec(i * LoadPipelineWidth + rport) := oldestSel(i) 447 } 448 449 oldest.valid := ageOldest.valid || issOldestValid 450 oldest.bits := oldestBitsVec.asUInt 451 oldest 452 })) 453 454 // stage2: send replay request to load unit 455 // replay cold down 456 val ColdDownCycles = 16 457 val coldCounter = RegInit(VecInit(List.fill(LoadPipelineWidth)(0.U(log2Up(ColdDownCycles).W)))) 458 val ColdDownThreshold = Wire(UInt(log2Up(ColdDownCycles).W)) 459 ColdDownThreshold := Constantin.createRecord("ColdDownThreshold_"+p(XSCoreParamsKey).HartId.toString(), initValue = 12.U) 460 assert(ColdDownCycles.U > ColdDownThreshold, "ColdDownCycles must great than ColdDownThreshold!") 461 462 def replayCanFire(i: Int) = coldCounter(i) >= 0.U && coldCounter(i) < ColdDownThreshold 463 def coldDownNow(i: Int) = coldCounter(i) >= ColdDownThreshold 464 465 for (i <- 0 until LoadPipelineWidth) { 466 val s0_can_go = s1_can_go(i) || 467 uop(s1_oldestSel(i).bits).robIdx.needFlush(io.redirect) || 468 uop(s1_oldestSel(i).bits).robIdx.needFlush(RegNext(io.redirect)) 469 val s0_oldestSelIndexOH = s0_oldestSel(i).bits // one-hot 470 s1_oldestSel(i).valid := RegEnable(s0_oldestSel(i).valid, s0_can_go) 471 s1_oldestSel(i).bits := RegEnable(OHToUInt(s0_oldestSel(i).bits), s0_can_go) 472 473 for (j <- 0 until LoadQueueReplaySize) { 474 when (s0_can_go && s0_oldestSel(i).valid && s0_oldestSelIndexOH(j)) { 475 scheduled(j) := true.B 476 } 477 } 478 } 479 val s2_cancelReplay = Wire(Vec(LoadPipelineWidth, Bool())) 480 for (i <- 0 until LoadPipelineWidth) { 481 val s1_cancel = uop(s1_oldestSel(i).bits).robIdx.needFlush(io.redirect) || 482 uop(s1_oldestSel(i).bits).robIdx.needFlush(RegNext(io.redirect)) 483 val s1_oldestSelV = s1_oldestSel(i).valid && !s1_cancel 484 s1_can_go(i) := replayCanFire(i) && (!s2_oldestSel(i).valid || io.replay(i).fire) || s2_cancelReplay(i) 485 s2_oldestSel(i).valid := RegEnable(Mux(s1_can_go(i), s1_oldestSelV, false.B), (s1_can_go(i) || io.replay(i).fire)) 486 s2_oldestSel(i).bits := RegEnable(s1_oldestSel(i).bits, s1_can_go(i)) 487 488 vaddrModule.io.ren(i) := s1_oldestSel(i).valid && s1_can_go(i) 489 vaddrModule.io.raddr(i) := s1_oldestSel(i).bits 490 } 491 492 for (i <- 0 until LoadPipelineWidth) { 493 val s1_replayIdx = s1_oldestSel(i).bits 494 val s2_replayUop = RegEnable(uop(s1_replayIdx), s1_can_go(i)) 495 val s2_replayMSHRId = RegEnable(missMSHRId(s1_replayIdx), s1_can_go(i)) 496 val s2_replacementUpdated = RegEnable(replacementUpdated(s1_replayIdx), s1_can_go(i)) 497 val s2_missDbUpdated = RegEnable(missDbUpdated(s1_replayIdx), s1_can_go(i)) 498 val s2_replayCauses = RegEnable(cause(s1_replayIdx), s1_can_go(i)) 499 val s2_replayCarry = RegEnable(replayCarryReg(s1_replayIdx), s1_can_go(i)) 500 val s2_replayCacheMissReplay = RegEnable(trueCacheMissReplay(s1_replayIdx), s1_can_go(i)) 501 s2_cancelReplay(i) := s2_replayUop.robIdx.needFlush(io.redirect) 502 503 s2_can_go(i) := DontCare 504 io.replay(i).valid := s2_oldestSel(i).valid 505 io.replay(i).bits := DontCare 506 io.replay(i).bits.uop := s2_replayUop 507 io.replay(i).bits.vaddr := vaddrModule.io.rdata(i) 508 io.replay(i).bits.isFirstIssue := false.B 509 io.replay(i).bits.isLoadReplay := true.B 510 io.replay(i).bits.replayCarry := s2_replayCarry 511 io.replay(i).bits.mshrid := s2_replayMSHRId 512 io.replay(i).bits.replacementUpdated := s2_replacementUpdated 513 io.replay(i).bits.missDbUpdated := s2_missDbUpdated 514 io.replay(i).bits.forward_tlDchannel := s2_replayCauses(LoadReplayCauses.C_DM) 515 io.replay(i).bits.schedIndex := s2_oldestSel(i).bits 516 517 when (io.replay(i).fire) { 518 XSError(!allocated(s2_oldestSel(i).bits), p"LoadQueueReplay: why replay an invalid entry ${s2_oldestSel(i).bits} ?") 519 } 520 } 521 522 // update cold counter 523 val lastReplay = RegNext(VecInit(io.replay.map(_.fire))) 524 for (i <- 0 until LoadPipelineWidth) { 525 when (lastReplay(i) && io.replay(i).fire) { 526 coldCounter(i) := coldCounter(i) + 1.U 527 } .elsewhen (coldDownNow(i)) { 528 coldCounter(i) := coldCounter(i) + 1.U 529 } .otherwise { 530 coldCounter(i) := 0.U 531 } 532 } 533 534 when(io.refill.valid) { 535 XSDebug("miss resp: paddr:0x%x data %x\n", io.refill.bits.addr, io.refill.bits.data) 536 } 537 538 // LoadQueueReplay deallocate 539 val freeMaskVec = Wire(Vec(LoadQueueReplaySize, Bool())) 540 541 // init 542 freeMaskVec.map(e => e := false.B) 543 544 // Allocate logic 545 val newEnqueue = (0 until LoadPipelineWidth).map(i => { 546 needEnqueue(i) && !io.enq(i).bits.isLoadReplay 547 }) 548 549 for ((enq, w) <- io.enq.zipWithIndex) { 550 vaddrModule.io.wen(w) := false.B 551 freeList.io.doAllocate(w) := false.B 552 553 freeList.io.allocateReq(w) := true.B 554 555 // Allocated ready 556 val offset = PopCount(newEnqueue.take(w)) 557 val canAccept = freeList.io.canAllocate(offset) 558 val enqIndex = Mux(enq.bits.isLoadReplay, enq.bits.schedIndex, freeList.io.allocateSlot(offset)) 559 enqIndexOH(w) := UIntToOH(enqIndex) 560 enq.ready := Mux(enq.bits.isLoadReplay, true.B, canAccept) 561 562 when (needEnqueue(w) && enq.ready) { 563 564 val debug_robIdx = enq.bits.uop.robIdx.asUInt 565 XSError(allocated(enqIndex) && !enq.bits.isLoadReplay, p"LoadQueueReplay: can not accept more load, check: ldu $w, robIdx $debug_robIdx!") 566 XSError(hasExceptions(w), p"LoadQueueReplay: The instruction has exception, it can not be replay, check: ldu $w, robIdx $debug_robIdx!") 567 568 freeList.io.doAllocate(w) := !enq.bits.isLoadReplay 569 570 // Allocate new entry 571 allocated(enqIndex) := true.B 572 scheduled(enqIndex) := false.B 573 uop(enqIndex) := enq.bits.uop 574 575 vaddrModule.io.wen(w) := true.B 576 vaddrModule.io.waddr(w) := enqIndex 577 vaddrModule.io.wdata(w) := enq.bits.vaddr 578 debug_vaddr(enqIndex) := enq.bits.vaddr 579 580 /** 581 * used for feedback and replay 582 */ 583 // set flags 584 val replayInfo = enq.bits.rep_info 585 val dataInLastBeat = replayInfo.last_beat 586 cause(enqIndex) := replayInfo.cause.asUInt 587 588 589 // init 590 blocking(enqIndex) := true.B 591 592 // update blocking pointer 593 when (replayInfo.cause(LoadReplayCauses.C_BC) || 594 replayInfo.cause(LoadReplayCauses.C_NK) || 595 replayInfo.cause(LoadReplayCauses.C_DR) || 596 replayInfo.cause(LoadReplayCauses.C_WF)) { 597 // normal case: bank conflict or schedule error or dcache replay 598 // can replay next cycle 599 blocking(enqIndex) := false.B 600 } 601 602 // special case: tlb miss 603 when (replayInfo.cause(LoadReplayCauses.C_TM)) { 604 blocking(enqIndex) := !replayInfo.tlb_full && 605 !(io.tlb_hint.resp.valid && (io.tlb_hint.resp.bits.id === replayInfo.tlb_id || io.tlb_hint.resp.bits.replay_all)) 606 tlbHintId(enqIndex) := replayInfo.tlb_id 607 } 608 609 // special case: dcache miss 610 when (replayInfo.cause(LoadReplayCauses.C_DM) && enq.bits.handledByMSHR) { 611 blocking(enqIndex) := !replayInfo.full_fwd && // dcache miss 612 !(io.tl_d_channel.valid && io.tl_d_channel.mshrid === replayInfo.mshr_id) // no refill in this cycle 613 } 614 615 // special case: st-ld violation 616 when (replayInfo.cause(LoadReplayCauses.C_MA)) { 617 blockSqIdx(enqIndex) := replayInfo.addr_inv_sq_idx 618 } 619 620 // special case: data forward fail 621 when (replayInfo.cause(LoadReplayCauses.C_FF)) { 622 blockSqIdx(enqIndex) := replayInfo.data_inv_sq_idx 623 } 624 // extra info 625 replayCarryReg(enqIndex) := replayInfo.rep_carry 626 replacementUpdated(enqIndex) := enq.bits.replacementUpdated 627 missDbUpdated(enqIndex) := enq.bits.missDbUpdated 628 // update mshr_id only when the load has already been handled by mshr 629 when(enq.bits.handledByMSHR) { 630 missMSHRId(enqIndex) := replayInfo.mshr_id 631 } 632 dataInLastBeatReg(enqIndex) := dataInLastBeat 633 } 634 635 // 636 val schedIndex = enq.bits.schedIndex 637 when (enq.valid && enq.bits.isLoadReplay) { 638 when (!needReplay(w) || hasExceptions(w)) { 639 allocated(schedIndex) := false.B 640 freeMaskVec(schedIndex) := true.B 641 } .otherwise { 642 scheduled(schedIndex) := false.B 643 } 644 } 645 } 646 647 // misprediction recovery / exception redirect 648 for (i <- 0 until LoadQueueReplaySize) { 649 needCancel(i) := uop(i).robIdx.needFlush(io.redirect) && allocated(i) 650 when (needCancel(i)) { 651 allocated(i) := false.B 652 freeMaskVec(i) := true.B 653 } 654 } 655 656 freeList.io.free := freeMaskVec.asUInt 657 658 io.lqFull := lqFull 659 660 // Topdown 661 val robHeadVaddr = io.debugTopDown.robHeadVaddr 662 663 val uop_wrapper = Wire(Vec(LoadQueueReplaySize, new XSBundleWithMicroOp)) 664 (uop_wrapper.zipWithIndex).foreach { 665 case (u, i) => { 666 u.uop := uop(i) 667 } 668 } 669 val lq_match_vec = (debug_vaddr.zip(allocated)).map{case(va, alloc) => alloc && (va === robHeadVaddr.bits)} 670 val rob_head_lq_match = ParallelOperation(lq_match_vec.zip(uop_wrapper), (a: Tuple2[Bool, XSBundleWithMicroOp], b: Tuple2[Bool, XSBundleWithMicroOp]) => { 671 val (a_v, a_uop) = (a._1, a._2) 672 val (b_v, b_uop) = (b._1, b._2) 673 674 val res = Mux(a_v && b_v, Mux(isAfter(a_uop.uop.robIdx, b_uop.uop.robIdx), b_uop, a_uop), 675 Mux(a_v, a_uop, 676 Mux(b_v, b_uop, 677 a_uop))) 678 (a_v || b_v, res) 679 }) 680 681 val lq_match_bits = rob_head_lq_match._2.uop 682 val lq_match = rob_head_lq_match._1 && robHeadVaddr.valid 683 val lq_match_idx = lq_match_bits.lqIdx.value 684 685 val rob_head_tlb_miss = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_TM) 686 val rob_head_nuke = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_NK) 687 val rob_head_mem_amb = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_MA) 688 val rob_head_confilct_replay = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_BC) 689 val rob_head_forward_fail = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_FF) 690 val rob_head_mshrfull_replay = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_DR) 691 val rob_head_dcache_miss = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_DM) 692 val rob_head_rar_nack = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_RAR) 693 val rob_head_raw_nack = lq_match && cause(lq_match_idx)(LoadReplayCauses.C_RAW) 694 val rob_head_other_replay = lq_match && (rob_head_rar_nack || rob_head_raw_nack || rob_head_forward_fail) 695 696 val rob_head_vio_replay = rob_head_nuke || rob_head_mem_amb 697 698 val rob_head_miss_in_dtlb = io.debugTopDown.robHeadMissInDTlb 699 io.debugTopDown.robHeadTlbReplay := rob_head_tlb_miss && !rob_head_miss_in_dtlb 700 io.debugTopDown.robHeadTlbMiss := rob_head_tlb_miss && rob_head_miss_in_dtlb 701 io.debugTopDown.robHeadLoadVio := rob_head_vio_replay 702 io.debugTopDown.robHeadLoadMSHR := rob_head_mshrfull_replay 703 io.debugTopDown.robHeadOtherReplay := rob_head_other_replay 704 val perfValidCount = RegNext(PopCount(allocated)) 705 706 // perf cnt 707 val enqNumber = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay)) 708 val deqNumber = PopCount(io.replay.map(_.fire)) 709 val deqBlockCount = PopCount(io.replay.map(r => r.valid && !r.ready)) 710 val replayTlbMissCount = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_TM))) 711 val replayMemAmbCount = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_MA))) 712 val replayNukeCount = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_NK))) 713 val replayRARRejectCount = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_RAR))) 714 val replayRAWRejectCount = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_RAW))) 715 val replayBankConflictCount = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_BC))) 716 val replayDCacheReplayCount = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_DR))) 717 val replayForwardFailCount = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_FF))) 718 val replayDCacheMissCount = PopCount(io.enq.map(enq => enq.fire && !enq.bits.isLoadReplay && enq.bits.rep_info.cause(LoadReplayCauses.C_DM))) 719 XSPerfAccumulate("enq", enqNumber) 720 XSPerfAccumulate("deq", deqNumber) 721 XSPerfAccumulate("deq_block", deqBlockCount) 722 XSPerfAccumulate("replay_full", io.lqFull) 723 XSPerfAccumulate("replay_rar_nack", replayRARRejectCount) 724 XSPerfAccumulate("replay_raw_nack", replayRAWRejectCount) 725 XSPerfAccumulate("replay_nuke", replayNukeCount) 726 XSPerfAccumulate("replay_mem_amb", replayMemAmbCount) 727 XSPerfAccumulate("replay_tlb_miss", replayTlbMissCount) 728 XSPerfAccumulate("replay_bank_conflict", replayBankConflictCount) 729 XSPerfAccumulate("replay_dcache_replay", replayDCacheReplayCount) 730 XSPerfAccumulate("replay_forward_fail", replayForwardFailCount) 731 XSPerfAccumulate("replay_dcache_miss", replayDCacheMissCount) 732 XSPerfAccumulate("replay_hint_wakeup", s0_hintSelValid) 733 734 val perfEvents: Seq[(String, UInt)] = Seq( 735 ("enq", enqNumber), 736 ("deq", deqNumber), 737 ("deq_block", deqBlockCount), 738 ("replay_full", io.lqFull), 739 ("replay_rar_nack", replayRARRejectCount), 740 ("replay_raw_nack", replayRAWRejectCount), 741 ("replay_nuke", replayNukeCount), 742 ("replay_mem_amb", replayMemAmbCount), 743 ("replay_tlb_miss", replayTlbMissCount), 744 ("replay_bank_conflict", replayBankConflictCount), 745 ("replay_dcache_replay", replayDCacheReplayCount), 746 ("replay_forward_fail", replayForwardFailCount), 747 ("replay_dcache_miss", replayDCacheMissCount), 748 ) 749 generatePerfEvent() 750 // end 751} 752