1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16package xiangshan.mem 17 18import chisel3._ 19import chisel3.util._ 20import org.chipsalliance.cde.config._ 21import xiangshan._ 22import xiangshan.backend.rob.RobPtr 23import xiangshan.cache._ 24import utils._ 25import utility._ 26import xiangshan.backend.Bundles.DynInst 27 28class LoadQueueRAR(implicit p: Parameters) extends XSModule 29 with HasDCacheParameters 30 with HasCircularQueuePtrHelper 31 with HasLoadHelper 32 with HasPerfEvents 33{ 34 val io = IO(new Bundle() { 35 // control 36 val redirect = Flipped(Valid(new Redirect)) 37 val vecFeedback = Vec(VecLoadPipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO))) 38 39 // violation query 40 val query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) 41 42 // release cacheline 43 val release = Flipped(Valid(new Release)) 44 45 // from VirtualLoadQueue 46 val ldWbPtr = Input(new LqPtr) 47 48 // global 49 val lqFull = Output(Bool()) 50 }) 51 52 println("LoadQueueRAR: size: " + LoadQueueRARSize) 53 // LoadQueueRAR field 54 // +-------+-------+-------+----------+ 55 // | Valid | Uop | PAddr | Released | 56 // +-------+-------+-------+----------+ 57 // 58 // Field descriptions: 59 // Allocated : entry is valid. 60 // MicroOp : Micro-op 61 // PAddr : physical address. 62 // Released : DCache released. 63 // 64 val allocated = RegInit(VecInit(List.fill(LoadQueueRARSize)(false.B))) // The control signals need to explicitly indicate the initial value 65 val uop = Reg(Vec(LoadQueueRARSize, new DynInst)) 66 val paddrModule = Module(new LqPAddrModule( 67 gen = UInt(PAddrBits.W), 68 numEntries = LoadQueueRARSize, 69 numRead = LoadPipelineWidth, 70 numWrite = LoadPipelineWidth, 71 numWBank = LoadQueueNWriteBanks, 72 numWDelay = 2, 73 numCamPort = LoadPipelineWidth 74 )) 75 paddrModule.io := DontCare 76 val released = RegInit(VecInit(List.fill(LoadQueueRARSize)(false.B))) 77 val bypassPAddr = Reg(Vec(LoadPipelineWidth, UInt(PAddrBits.W))) 78 79 // freeliset: store valid entries index. 80 // +---+---+--------------+-----+-----+ 81 // | 0 | 1 | ...... | n-2 | n-1 | 82 // +---+---+--------------+-----+-----+ 83 val freeList = Module(new FreeList( 84 size = LoadQueueRARSize, 85 allocWidth = LoadPipelineWidth, 86 freeWidth = 4, 87 enablePreAlloc = true, 88 moduleName = "LoadQueueRAR freelist" 89 )) 90 freeList.io := DontCare 91 92 // Real-allocation: load_s2 93 // PAddr write needs 2 cycles, release signal should delay 1 cycle so that 94 // load enqueue can catch release. 95 val release1Cycle = io.release 96 val release2Cycle = RegNext(io.release) 97 val release2Cycle_dup_lsu = RegNext(io.release) 98 99 // LoadQueueRAR enqueue condition: 100 // There are still not completed load instructions before the current load instruction. 101 // (e.g. "not completed" means that load instruction get the data or exception). 102 val canEnqueue = io.query.map(_.req.valid) 103 val cancelEnqueue = io.query.map(_.req.bits.uop.robIdx.needFlush(io.redirect)) 104 val hasNotWritebackedLoad = io.query.map(_.req.bits.uop.lqIdx).map(lqIdx => isAfter(lqIdx, io.ldWbPtr)) 105 val needEnqueue = canEnqueue.zip(hasNotWritebackedLoad).zip(cancelEnqueue).map { case ((v, r), c) => v && r && !c } 106 107 // Allocate logic 108 val acceptedVec = Wire(Vec(LoadPipelineWidth, Bool())) 109 val enqIndexVec = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueRARSize).W))) 110 111 for ((enq, w) <- io.query.map(_.req).zipWithIndex) { 112 acceptedVec(w) := false.B 113 paddrModule.io.wen(w) := false.B 114 freeList.io.doAllocate(w) := false.B 115 116 freeList.io.allocateReq(w) := true.B 117 118 // Allocate ready 119 val offset = PopCount(needEnqueue.take(w)) 120 val canAccept = freeList.io.canAllocate(offset) 121 val enqIndex = freeList.io.allocateSlot(offset) 122 enq.ready := Mux(needEnqueue(w), canAccept, true.B) 123 124 enqIndexVec(w) := enqIndex 125 when (needEnqueue(w) && enq.ready) { 126 acceptedVec(w) := true.B 127 128 val debug_robIdx = enq.bits.uop.robIdx.asUInt 129 XSError(allocated(enqIndex), p"LoadQueueRAR: You can not write an valid entry! check: ldu $w, robIdx $debug_robIdx") 130 131 freeList.io.doAllocate(w) := true.B 132 133 // Allocate new entry 134 allocated(enqIndex) := true.B 135 136 // Write paddr 137 paddrModule.io.wen(w) := true.B 138 paddrModule.io.waddr(w) := enqIndex 139 paddrModule.io.wdata(w) := enq.bits.paddr 140 bypassPAddr(w) := enq.bits.paddr 141 142 // Fill info 143 uop(enqIndex) := enq.bits.uop 144 released(enqIndex) := 145 enq.bits.data_valid && 146 (release2Cycle.valid && 147 enq.bits.paddr(PAddrBits-1, DCacheLineOffset) === release2Cycle.bits.paddr(PAddrBits-1, DCacheLineOffset) || 148 release1Cycle.valid && 149 enq.bits.paddr(PAddrBits-1, DCacheLineOffset) === release1Cycle.bits.paddr(PAddrBits-1, DCacheLineOffset)) 150 } 151 } 152 153 // LoadQueueRAR deallocate 154 val freeMaskVec = Wire(Vec(LoadQueueRARSize, Bool())) 155 156 // init 157 freeMaskVec.map(e => e := false.B) 158 159 // when the loads that "older than" current load were writebacked, 160 // current load will be released. 161 val vecLdCanceltmp = Wire(Vec(LoadQueueRARSize, Vec(VecLoadPipelineWidth, Bool()))) 162 val vecLdCancel = Wire(Vec(LoadQueueRARSize, Bool())) 163 for (i <- 0 until LoadQueueRARSize) { 164 val deqNotBlock = !isBefore(io.ldWbPtr, uop(i).lqIdx) 165 val needFlush = uop(i).robIdx.needFlush(io.redirect) 166 val fbk = io.vecFeedback 167 for (j <- 0 until VecLoadPipelineWidth) { 168 vecLdCanceltmp(i)(j) := allocated(i) && fbk(j).valid && fbk(j).bits.isFlush && uop(i).robIdx === fbk(j).bits.robidx && uop(i).uopIdx === fbk(j).bits.uopidx 169 } 170 vecLdCancel(i) := vecLdCanceltmp(i).reduce(_ || _) 171 172 when (allocated(i) && (deqNotBlock || needFlush || vecLdCancel(i))) { 173 allocated(i) := false.B 174 freeMaskVec(i) := true.B 175 } 176 } 177 178 // if need replay revoke entry 179 val lastCanAccept = RegNext(acceptedVec) 180 val lastAllocIndex = RegNext(enqIndexVec) 181 182 for ((revoke, w) <- io.query.map(_.revoke).zipWithIndex) { 183 val revokeValid = revoke && lastCanAccept(w) 184 val revokeIndex = lastAllocIndex(w) 185 186 when (allocated(revokeIndex) && revokeValid) { 187 allocated(revokeIndex) := false.B 188 freeMaskVec(revokeIndex) := true.B 189 } 190 } 191 192 freeList.io.free := freeMaskVec.asUInt 193 194 // LoadQueueRAR Query 195 // Load-to-Load violation check condition: 196 // 1. Physical address match by CAM port. 197 // 2. release is set. 198 // 3. Younger than current load instruction. 199 val ldLdViolation = Wire(Vec(LoadPipelineWidth, Bool())) 200 val allocatedUInt = RegNext(allocated.asUInt) 201 for ((query, w) <- io.query.zipWithIndex) { 202 ldLdViolation(w) := false.B 203 paddrModule.io.releaseViolationMdata(w) := query.req.bits.paddr 204 205 query.resp.valid := RegNext(query.req.valid) 206 // Generate real violation mask 207 val robIdxMask = VecInit(uop.map(_.robIdx).map(isAfter(_, query.req.bits.uop.robIdx))) 208 val matchMask = (0 until LoadQueueRARSize).map(i => { 209 RegNext(allocated(i) & 210 paddrModule.io.releaseViolationMmask(w)(i) & 211 robIdxMask(i) && 212 released(i)) 213 }) 214 // Load-to-Load violation check result 215 val ldLdViolationMask = VecInit(matchMask) 216 ldLdViolationMask.suggestName("ldLdViolationMask_" + w) 217 query.resp.bits.rep_frm_fetch := ParallelORR(ldLdViolationMask) 218 } 219 220 221 // When io.release.valid (release1cycle.valid), it uses the last ld-ld paddr cam port to 222 // update release flag in 1 cycle 223 val releaseVioMask = Reg(Vec(LoadQueueRARSize, Bool())) 224 when (release1Cycle.valid) { 225 paddrModule.io.releaseMdata.takeRight(1)(0) := release1Cycle.bits.paddr 226 } 227 228 val lastAllocIndexOH = lastAllocIndex.map(UIntToOH(_)) 229 val lastReleasePAddrMatch = VecInit((0 until LoadPipelineWidth).map(i => { 230 (bypassPAddr(i)(PAddrBits-1, DCacheLineOffset) === release1Cycle.bits.paddr(PAddrBits-1, DCacheLineOffset)) 231 })) 232 (0 until LoadQueueRARSize).map(i => { 233 val bypassMatch = VecInit((0 until LoadPipelineWidth).map(j => lastCanAccept(j) && lastAllocIndexOH(j)(i) && lastReleasePAddrMatch(j))).asUInt.orR 234 when (RegNext((paddrModule.io.releaseMmask.takeRight(1)(0)(i) || bypassMatch) && allocated(i) && release1Cycle.valid)) { 235 // Note: if a load has missed in dcache and is waiting for refill in load queue, 236 // its released flag still needs to be set as true if addr matches. 237 released(i) := true.B 238 } 239 }) 240 241 io.lqFull := freeList.io.empty 242 243 // perf cnt 244 val canEnqCount = PopCount(io.query.map(_.req.fire)) 245 val validCount = freeList.io.validCount 246 val allowEnqueue = validCount <= (LoadQueueRARSize - LoadPipelineWidth).U 247 val ldLdViolationCount = PopCount(io.query.map(_.resp).map(resp => resp.valid && resp.bits.rep_frm_fetch)) 248 249 QueuePerf(LoadQueueRARSize, validCount, !allowEnqueue) 250 XSPerfAccumulate("enq", canEnqCount) 251 XSPerfAccumulate("ld_ld_violation", ldLdViolationCount) 252 val perfEvents: Seq[(String, UInt)] = Seq( 253 ("enq", canEnqCount), 254 ("ld_ld_violation", ldLdViolationCount) 255 ) 256 generatePerfEvent() 257 // End 258}