1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16package xiangshan.mem 17 18import chisel3._ 19import chisel3.util._ 20import org.chipsalliance.cde.config._ 21import xiangshan._ 22import xiangshan.backend.rob.RobPtr 23import xiangshan.cache._ 24import utils._ 25import utility._ 26import xiangshan.backend.Bundles.DynInst 27 28class LoadQueueRAR(implicit p: Parameters) extends XSModule 29 with HasDCacheParameters 30 with HasCircularQueuePtrHelper 31 with HasLoadHelper 32 with HasPerfEvents 33{ 34 val io = IO(new Bundle() { 35 // control 36 val redirect = Flipped(Valid(new Redirect)) 37 38 // violation query 39 val query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) 40 41 // release cacheline 42 val release = Flipped(Valid(new Release)) 43 44 // from VirtualLoadQueue 45 val ldWbPtr = Input(new LqPtr) 46 47 // global 48 val lqFull = Output(Bool()) 49 }) 50 51 private val PartialPAddrStride: Int = 6 52 private val PartialPAddrBits: Int = 16 53 private val PartialPAddrLowBits: Int = (PartialPAddrBits - PartialPAddrStride) / 2 // avoid overlap 54 private val PartialPAddrHighBits: Int = PartialPAddrBits - PartialPAddrLowBits 55 private def boundary(x: Int, h: Int) = if (x < h) Some(x) else None 56 private def lowMapping = (0 until PartialPAddrLowBits).map(i => Seq( 57 boundary(PartialPAddrStride + i , PartialPAddrBits), 58 boundary(PartialPAddrBits - i - 1, PartialPAddrBits) 59 ) 60 ) 61 private def highMapping = (0 until PartialPAddrHighBits).map(i => Seq( 62 boundary(i + PartialPAddrStride , PAddrBits), 63 boundary(i + PartialPAddrStride + 11, PAddrBits), 64 boundary(i + PartialPAddrStride + 22, PAddrBits), 65 boundary(i + PartialPAddrStride + 33, PAddrBits) 66 ) 67 ) 68 private def genPartialPAddr(paddr: UInt) = { 69 val ppaddr_low = Wire(Vec(PartialPAddrLowBits, Bool())) 70 ppaddr_low.zip(lowMapping).foreach { 71 case (bit, mapping) => 72 bit := mapping.filter(_.isDefined).map(x => paddr(x.get)).reduce(_^_) 73 } 74 75 val ppaddr_high = Wire(Vec(PartialPAddrHighBits, Bool())) 76 ppaddr_high.zip(highMapping).foreach { 77 case (bit, mapping) => 78 bit := mapping.filter(_.isDefined).map(x => paddr(x.get)).reduce(_^_) 79 } 80 Cat(ppaddr_high.asUInt, ppaddr_low.asUInt) 81 } 82 83 println("LoadQueueRAR: size: " + LoadQueueRARSize) 84 // LoadQueueRAR field 85 // +-------+-------+-------+----------+ 86 // | Valid | Uop | PAddr | Released | 87 // +-------+-------+-------+----------+ 88 // 89 // Field descriptions: 90 // Allocated : entry is valid. 91 // MicroOp : Micro-op 92 // PAddr : physical address. 93 // Released : DCache released. 94 val allocated = RegInit(VecInit(List.fill(LoadQueueRARSize)(false.B))) // The control signals need to explicitly indicate the initial value 95 val uop = Reg(Vec(LoadQueueRARSize, new DynInst)) 96 val paddrModule = Module(new LqPAddrModule( 97 gen = UInt(PartialPAddrBits.W), 98 numEntries = LoadQueueRARSize, 99 numRead = LoadPipelineWidth, 100 numWrite = LoadPipelineWidth, 101 numWBank = LoadQueueNWriteBanks, 102 numWDelay = 2, 103 numCamPort = LoadPipelineWidth 104 )) 105 paddrModule.io := DontCare 106 val released = RegInit(VecInit(List.fill(LoadQueueRARSize)(false.B))) 107 108 // freeliset: store valid entries index. 109 // +---+---+--------------+-----+-----+ 110 // | 0 | 1 | ...... | n-2 | n-1 | 111 // +---+---+--------------+-----+-----+ 112 val freeList = Module(new FreeList( 113 size = LoadQueueRARSize, 114 allocWidth = LoadPipelineWidth, 115 freeWidth = 4, 116 enablePreAlloc = true, 117 moduleName = "LoadQueueRAR freelist" 118 )) 119 freeList.io := DontCare 120 121 // Real-allocation: load_s2 122 // PAddr write needs 2 cycles, release signal should delay 1 cycle so that 123 // load enqueue can catch release. 124 val release1Cycle = io.release 125 // val release2Cycle = RegNext(io.release) 126 // val release2Cycle_dup_lsu = RegNext(io.release) 127 val release2Cycle = RegEnable(io.release, io.release.valid) 128 release2Cycle.valid := RegNext(io.release.valid) 129 //val release2Cycle_dup_lsu = RegEnable(io.release, io.release.valid) 130 131 // LoadQueueRAR enqueue condition: 132 // There are still not completed load instructions before the current load instruction. 133 // (e.g. "not completed" means that load instruction get the data or exception). 134 val canEnqueue = io.query.map(_.req.valid) 135 val cancelEnqueue = io.query.map(_.req.bits.uop.robIdx.needFlush(io.redirect)) 136 val hasNotWritebackedLoad = io.query.map(_.req.bits.uop.lqIdx).map(lqIdx => isAfter(lqIdx, io.ldWbPtr)) 137 val needEnqueue = canEnqueue.zip(hasNotWritebackedLoad).zip(cancelEnqueue).map { case ((v, r), c) => v && r && !c } 138 139 // Allocate logic 140 val acceptedVec = Wire(Vec(LoadPipelineWidth, Bool())) 141 val enqIndexVec = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueRARSize).W))) 142 143 for ((enq, w) <- io.query.map(_.req).zipWithIndex) { 144 acceptedVec(w) := false.B 145 paddrModule.io.wen(w) := false.B 146 freeList.io.doAllocate(w) := false.B 147 148 freeList.io.allocateReq(w) := true.B 149 150 // Allocate ready 151 val offset = PopCount(needEnqueue.take(w)) 152 val canAccept = freeList.io.canAllocate(offset) 153 val enqIndex = freeList.io.allocateSlot(offset) 154 enq.ready := Mux(needEnqueue(w), canAccept, true.B) 155 156 enqIndexVec(w) := enqIndex 157 when (needEnqueue(w) && enq.ready) { 158 acceptedVec(w) := true.B 159 160 freeList.io.doAllocate(w) := true.B 161 // Allocate new entry 162 allocated(enqIndex) := true.B 163 164 // Write paddr 165 paddrModule.io.wen(w) := true.B 166 paddrModule.io.waddr(w) := enqIndex 167 paddrModule.io.wdata(w) := genPartialPAddr(enq.bits.paddr) 168 169 // Fill info 170 uop(enqIndex) := enq.bits.uop 171 // NC is uncachable and will not be explicitly released. 172 // So NC requests are not allowed to have RAR 173 released(enqIndex) := enq.bits.is_nc || ( 174 enq.bits.data_valid && 175 (release2Cycle.valid && 176 enq.bits.paddr(PAddrBits-1, DCacheLineOffset) === release2Cycle.bits.paddr(PAddrBits-1, DCacheLineOffset) || 177 release1Cycle.valid && 178 enq.bits.paddr(PAddrBits-1, DCacheLineOffset) === release1Cycle.bits.paddr(PAddrBits-1, DCacheLineOffset)) 179 ) 180 } 181 val debug_robIdx = enq.bits.uop.robIdx.asUInt 182 XSError( 183 needEnqueue(w) && enq.ready && allocated(enqIndex), 184 p"LoadQueueRAR: You can not write an valid entry! check: ldu $w, robIdx $debug_robIdx") 185 } 186 187 // LoadQueueRAR deallocate 188 val freeMaskVec = Wire(Vec(LoadQueueRARSize, Bool())) 189 190 // init 191 freeMaskVec.map(e => e := false.B) 192 193 // when the loads that "older than" current load were writebacked, 194 // current load will be released. 195 for (i <- 0 until LoadQueueRARSize) { 196 val deqNotBlock = !isBefore(io.ldWbPtr, uop(i).lqIdx) 197 val needFlush = uop(i).robIdx.needFlush(io.redirect) 198 199 when (allocated(i) && (deqNotBlock || needFlush)) { 200 allocated(i) := false.B 201 freeMaskVec(i) := true.B 202 } 203 } 204 205 // if need replay revoke entry 206 val lastCanAccept = GatedRegNext(acceptedVec) 207 val lastAllocIndex = GatedRegNext(enqIndexVec) 208 209 for ((revoke, w) <- io.query.map(_.revoke).zipWithIndex) { 210 val revokeValid = revoke && lastCanAccept(w) 211 val revokeIndex = lastAllocIndex(w) 212 213 when (allocated(revokeIndex) && revokeValid) { 214 allocated(revokeIndex) := false.B 215 freeMaskVec(revokeIndex) := true.B 216 } 217 } 218 219 freeList.io.free := freeMaskVec.asUInt 220 221 // LoadQueueRAR Query 222 // Load-to-Load violation check condition: 223 // 1. Physical address match by CAM port. 224 // 2. release or nc_with_data is set. 225 // 3. Younger than current load instruction. 226 val ldLdViolation = Wire(Vec(LoadPipelineWidth, Bool())) 227 //val allocatedUInt = RegNext(allocated.asUInt) 228 for ((query, w) <- io.query.zipWithIndex) { 229 ldLdViolation(w) := false.B 230 paddrModule.io.releaseViolationMdata(w) := genPartialPAddr(query.req.bits.paddr) 231 232 query.resp.valid := RegNext(query.req.valid) 233 // Generate real violation mask 234 val robIdxMask = VecInit(uop.map(_.robIdx).map(isAfter(_, query.req.bits.uop.robIdx))) 235 val matchMaskReg = Wire(Vec(LoadQueueRARSize, Bool())) 236 for(i <- 0 until LoadQueueRARSize) { 237 matchMaskReg(i) := (allocated(i) & 238 paddrModule.io.releaseViolationMmask(w)(i) & 239 robIdxMask(i) && 240 released(i)) 241 } 242 val matchMask = GatedValidRegNext(matchMaskReg) 243 // Load-to-Load violation check result 244 val ldLdViolationMask = matchMask 245 ldLdViolationMask.suggestName("ldLdViolationMask_" + w) 246 query.resp.bits.rep_frm_fetch := ParallelORR(ldLdViolationMask) 247 } 248 249 250 // When io.release.valid (release1cycle.valid), it uses the last ld-ld paddr cam port to 251 // update release flag in 1 cycle 252 val releaseVioMask = Reg(Vec(LoadQueueRARSize, Bool())) 253 when (release1Cycle.valid) { 254 paddrModule.io.releaseMdata.takeRight(1)(0) := genPartialPAddr(release1Cycle.bits.paddr) 255 } 256 257 (0 until LoadQueueRARSize).map(i => { 258 when (RegNext((paddrModule.io.releaseMmask.takeRight(1)(0)(i)) && allocated(i) && release1Cycle.valid)) { 259 // Note: if a load has missed in dcache and is waiting for refill in load queue, 260 // its released flag still needs to be set as true if addr matches. 261 released(i) := true.B 262 } 263 }) 264 265 io.lqFull := freeList.io.empty 266 267 // perf cnt 268 val canEnqCount = PopCount(io.query.map(_.req.fire)) 269 val validCount = freeList.io.validCount 270 val allowEnqueue = validCount <= (LoadQueueRARSize - LoadPipelineWidth).U 271 val ldLdViolationCount = PopCount(io.query.map(_.resp).map(resp => resp.valid && resp.bits.rep_frm_fetch)) 272 273 QueuePerf(LoadQueueRARSize, validCount, !allowEnqueue) 274 XSPerfAccumulate("enq", canEnqCount) 275 XSPerfAccumulate("ld_ld_violation", ldLdViolationCount) 276 val perfEvents: Seq[(String, UInt)] = Seq( 277 ("enq", canEnqCount), 278 ("ld_ld_violation", ldLdViolationCount) 279 ) 280 generatePerfEvent() 281 // End 282}