1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16package xiangshan.mem 17 18import chisel3._ 19import chisel3.util._ 20import org.chipsalliance.cde.config._ 21import xiangshan._ 22import xiangshan.backend.rob.{RobLsqIO, RobPtr} 23import xiangshan.ExceptionNO._ 24import xiangshan.cache._ 25import utils._ 26import utility._ 27import xiangshan.backend.Bundles.DynInst 28import xiangshan.backend.fu.FuConfig.LduCfg 29 30class VirtualLoadQueue(implicit p: Parameters) extends XSModule 31 with HasDCacheParameters 32 with HasCircularQueuePtrHelper 33 with HasLoadHelper 34 with HasPerfEvents 35{ 36 val io = IO(new Bundle() { 37 // control 38 val redirect = Flipped(Valid(new Redirect)) 39 // from dispatch 40 val enq = new LqEnqIO 41 // from ldu s3 42 val ldin = Vec(LoadPipelineWidth, Flipped(DecoupledIO(new LqWriteBundle))) 43 // to LoadQueueReplay and LoadQueueRAR 44 val ldWbPtr = Output(new LqPtr) 45 // global 46 val lqFull = Output(Bool()) 47 val lqEmpty = Output(Bool()) 48 // to dispatch 49 val lqDeq = Output(UInt(log2Up(CommitWidth + 1).W)) 50 val lqCancelCnt = Output(UInt(log2Up(VirtualLoadQueueSize+1).W)) 51 }) 52 53 println("VirtualLoadQueue: size: " + VirtualLoadQueueSize) 54 // VirtualLoadQueue field 55 // +-----------+---------+-------+ 56 // | Allocated | MicroOp | Flags | 57 // +-----------+---------+-------+ 58 // Allocated : entry has been allocated already 59 // MicroOp : inst's microOp 60 // Flags : load flags 61 val allocated = RegInit(VecInit(List.fill(VirtualLoadQueueSize)(false.B))) // The control signals need to explicitly indicate the initial value 62 val uop = Reg(Vec(VirtualLoadQueueSize, new DynInst)) 63 val addrvalid = RegInit(VecInit(List.fill(VirtualLoadQueueSize)(false.B))) // non-mmio addr is valid 64 val datavalid = RegInit(VecInit(List.fill(VirtualLoadQueueSize)(false.B))) // non-mmio data is valid 65 66 /** 67 * used for debug 68 */ 69 val debug_mmio = Reg(Vec(VirtualLoadQueueSize, Bool())) // mmio: inst is an mmio inst 70 val debug_paddr = Reg(Vec(VirtualLoadQueueSize, UInt(PAddrBits.W))) // mmio: inst's paddr 71 72 // maintain pointers 73 val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new LqPtr)))) 74 val enqPtr = enqPtrExt(0).value 75 val deqPtr = Wire(new LqPtr) 76 val deqPtrNext = Wire(new LqPtr) 77 78 /** 79 * update pointer 80 */ 81 val lastCycleRedirect = RegNext(io.redirect) 82 val lastLastCycleRedirect = RegNext(lastCycleRedirect) 83 84 val validCount = distanceBetween(enqPtrExt(0), deqPtr) 85 val allowEnqueue = validCount <= (VirtualLoadQueueSize - LoadPipelineWidth).U 86 val canEnqueue = io.enq.req.map(_.valid) 87 val needCancel = WireInit(VecInit((0 until VirtualLoadQueueSize).map(i => { 88 uop(i).robIdx.needFlush(io.redirect) && allocated(i) 89 }))) 90 val lastNeedCancel = RegNext(needCancel) 91 val enqCancel = io.enq.req.map(_.bits.robIdx.needFlush(io.redirect)) 92 val lastEnqCancel = PopCount(RegNext(VecInit(canEnqueue.zip(enqCancel).map(x => x._1 && x._2)))) 93 val lastCycleCancelCount = PopCount(lastNeedCancel) 94 val redirectCancelCount = RegEnable(lastCycleCancelCount + lastEnqCancel, 0.U, lastCycleRedirect.valid) 95 96 // update enqueue pointer 97 val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept, PopCount(io.enq.req.map(_.valid)), 0.U) 98 val enqPtrExtNextVec = Wire(Vec(io.enq.req.length, new LqPtr)) 99 val enqPtrExtNext = Wire(Vec(io.enq.req.length, new LqPtr)) 100 when (lastLastCycleRedirect.valid) { 101 // we recover the pointers in the next cycle after redirect 102 enqPtrExtNextVec := VecInit(enqPtrExt.map(_ - redirectCancelCount)) 103 } .otherwise { 104 enqPtrExtNextVec := VecInit(enqPtrExt.map(_ + enqNumber)) 105 } 106 assert(!(lastCycleRedirect.valid && enqNumber =/= 0.U)) 107 108 when (isAfter(enqPtrExtNextVec(0), deqPtrNext)) { 109 enqPtrExtNext := enqPtrExtNextVec 110 } .otherwise { 111 enqPtrExtNext := VecInit((0 until io.enq.req.length).map(i => deqPtrNext + i.U)) 112 } 113 enqPtrExt := enqPtrExtNext 114 115 // update dequeue pointer 116 val DeqPtrMoveStride = CommitWidth 117 require(DeqPtrMoveStride == CommitWidth, "DeqPtrMoveStride must be equal to CommitWidth!") 118 val deqLookupVec = VecInit((0 until DeqPtrMoveStride).map(deqPtr + _.U)) 119 val deqLookup = VecInit(deqLookupVec.map(ptr => allocated(ptr.value) && datavalid(ptr.value) && addrvalid(ptr.value) && ptr =/= enqPtrExt(0))) 120 val deqInSameRedirectCycle = VecInit(deqLookupVec.map(ptr => needCancel(ptr.value))) 121 // make chisel happy 122 val deqCountMask = Wire(UInt(DeqPtrMoveStride.W)) 123 deqCountMask := deqLookup.asUInt & (~deqInSameRedirectCycle.asUInt).asUInt 124 val commitCount = PopCount(PriorityEncoderOH(~deqCountMask) - 1.U) 125 val lastCommitCount = RegNext(commitCount) 126 127 // update deqPtr 128 // cycle 1: generate deqPtrNext 129 // cycle 2: update deqPtr 130 val deqPtrUpdateEna = lastCommitCount =/= 0.U 131 deqPtrNext := deqPtr + lastCommitCount 132 deqPtr := RegEnable(deqPtrNext, 0.U.asTypeOf(new LqPtr), deqPtrUpdateEna) 133 134 io.lqDeq := RegNext(lastCommitCount) 135 io.lqCancelCnt := redirectCancelCount 136 io.ldWbPtr := deqPtr 137 io.lqEmpty := RegNext(validCount === 0.U) 138 139 /** 140 * Enqueue at dispatch 141 * 142 * Currently, VirtualLoadQueue only allows enqueue when #emptyEntries > EnqWidth 143 */ 144 io.enq.canAccept := allowEnqueue 145 for (i <- 0 until io.enq.req.length) { 146 val offset = PopCount(io.enq.needAlloc.take(i)) 147 val lqIdx = enqPtrExt(offset) 148 val index = io.enq.req(i).bits.lqIdx.value 149 when (canEnqueue(i) && !enqCancel(i)) { 150 allocated(index) := true.B 151 uop(index) := io.enq.req(i).bits 152 uop(index).lqIdx := lqIdx 153 154 // init 155 addrvalid(index) := false.B 156 datavalid(index) := false.B 157 158 debug_mmio(index) := false.B 159 debug_paddr(index) := 0.U 160 161 XSError(!io.enq.canAccept || !io.enq.sqCanAccept, s"must accept $i\n") 162 XSError(index =/= lqIdx.value, s"must be the same entry $i\n") 163 } 164 io.enq.resp(i) := lqIdx 165 } 166 167 /** 168 * Load commits 169 * 170 * When load commited, mark it as !allocated and move deqPtr forward. 171 */ 172 (0 until DeqPtrMoveStride).map(i => { 173 when (commitCount > i.U) { 174 allocated((deqPtr+i.U).value) := false.B 175 XSError(!allocated((deqPtr+i.U).value), s"why commit invalid entry $i?\n") 176 } 177 }) 178 179 // misprediction recovery / exception redirect 180 // invalidate lq term using robIdx 181 for (i <- 0 until VirtualLoadQueueSize) { 182 when (needCancel(i)) { 183 allocated(i) := false.B 184 } 185 } 186 187 XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n") 188 189 /** 190 * Writeback load from load units 191 * 192 * Most load instructions writeback to regfile at the same time. 193 * However, 194 * (1) For ready load instruction (no need replay), it writes back to ROB immediately. 195 */ 196 for(i <- 0 until LoadPipelineWidth) { 197 // most lq status need to be updated immediately after load writeback to lq 198 // flag bits in lq needs to be updated accurately 199 io.ldin(i).ready := true.B 200 val loadWbIndex = io.ldin(i).bits.uop.lqIdx.value 201 202 when (io.ldin(i).valid) { 203 val hasExceptions = ExceptionNO.selectByFu(io.ldin(i).bits.uop.exceptionVec, LduCfg).asUInt.orR 204 val need_rep = io.ldin(i).bits.rep_info.need_rep 205 206 when (!need_rep) { 207 // update control flag 208 addrvalid(loadWbIndex) := hasExceptions || !io.ldin(i).bits.tlbMiss 209 datavalid(loadWbIndex) := 210 (if (EnableFastForward) { 211 hasExceptions || 212 io.ldin(i).bits.mmio || 213 !io.ldin(i).bits.miss && // dcache miss 214 !io.ldin(i).bits.dcacheRequireReplay // do not writeback if that inst will be resend from rs 215 } else { 216 hasExceptions || 217 io.ldin(i).bits.mmio || 218 !io.ldin(i).bits.miss 219 }) 220 221 // 222 when (io.ldin(i).bits.data_wen_dup(1)) { 223 uop(loadWbIndex) := io.ldin(i).bits.uop 224 } 225 when (io.ldin(i).bits.data_wen_dup(4)) { 226 uop(loadWbIndex).debugInfo := io.ldin(i).bits.uop.debugInfo 227 } 228 uop(loadWbIndex).debugInfo := io.ldin(i).bits.rep_info.debug 229 230 // Debug info 231 debug_mmio(loadWbIndex) := io.ldin(i).bits.mmio 232 debug_paddr(loadWbIndex) := io.ldin(i).bits.paddr 233 234 XSInfo(io.ldin(i).valid, "load hit write to lq idx %d pc 0x%x vaddr %x paddr %x mask %x forwardData %x forwardMask: %x mmio %x\n", 235 io.ldin(i).bits.uop.lqIdx.asUInt, 236 io.ldin(i).bits.uop.pc, 237 io.ldin(i).bits.vaddr, 238 io.ldin(i).bits.paddr, 239 io.ldin(i).bits.mask, 240 io.ldin(i).bits.forwardData.asUInt, 241 io.ldin(i).bits.forwardMask.asUInt, 242 io.ldin(i).bits.mmio 243 ) 244 } 245 } 246 } 247 248 // perf counter 249 QueuePerf(VirtualLoadQueueSize, validCount, !allowEnqueue) 250 io.lqFull := !allowEnqueue 251 val perfEvents: Seq[(String, UInt)] = Seq() 252 generatePerfEvent() 253 254 // debug info 255 XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtr.flag, deqPtr.value) 256 257 def PrintFlag(flag: Bool, name: String): Unit = { 258 when(flag) { 259 XSDebug(false, true.B, name) 260 }.otherwise { 261 XSDebug(false, true.B, " ") 262 } 263 } 264 265 for (i <- 0 until VirtualLoadQueueSize) { 266 XSDebug(i + " pc %x pa %x ", uop(i).pc, debug_paddr(i)) 267 PrintFlag(allocated(i), "v") 268 PrintFlag(allocated(i) && datavalid(i), "d") 269 PrintFlag(allocated(i) && addrvalid(i), "a") 270 PrintFlag(allocated(i) && addrvalid(i) && datavalid(i), "w") 271 XSDebug(false, true.B, "\n") 272 } 273 // end 274} 275