1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16package xiangshan.mem 17 18import chisel3._ 19import chisel3.util._ 20import chipsalliance.rocketchip.config._ 21import xiangshan._ 22import xiangshan.backend.rob.RobPtr 23import xiangshan.cache._ 24import utils._ 25import utility._ 26 27class LoadQueueRAR(implicit p: Parameters) extends XSModule 28 with HasDCacheParameters 29 with HasCircularQueuePtrHelper 30 with HasLoadHelper 31 with HasPerfEvents 32{ 33 val io = IO(new Bundle() { 34 val redirect = Flipped(Valid(new Redirect)) 35 val query = Vec(LoadPipelineWidth, Flipped(new LoadViolationQueryIO)) 36 val release = Flipped(Valid(new Release)) 37 val ldWbPtr = Input(new LqPtr) 38 val lqFull = Output(Bool()) 39 }) 40 41 println("LoadQueueRAR: size: " + LoadQueueRARSize) 42 // LoadQueueRAR field 43 // +-------+-------+-------+----------+ 44 // | Valid | Uop | PAddr | Released | 45 // +-------+-------+-------+----------+ 46 // 47 // Field descriptions: 48 // Allocated : entry is valid. 49 // MicroOp : Micro-op 50 // PAddr : physical address. 51 // Released : DCache released. 52 // 53 val allocated = RegInit(VecInit(List.fill(LoadQueueRARSize)(false.B))) // The control signals need to explicitly indicate the initial value 54 val uop = Reg(Vec(LoadQueueRARSize, new MicroOp)) 55 val paddrModule = Module(new LqPAddrModule( 56 gen = UInt(PAddrBits.W), 57 numEntries = LoadQueueRARSize, 58 numRead = LoadPipelineWidth, 59 numWrite = LoadPipelineWidth, 60 numWBank = LoadQueueNWriteBanks, 61 numWDelay = 2, 62 numCamPort = LoadPipelineWidth 63 )) 64 paddrModule.io := DontCare 65 val released = RegInit(VecInit(List.fill(LoadQueueRARSize)(false.B))) 66 67 // freeliset: store valid entries index. 68 // +---+---+--------------+-----+-----+ 69 // | 0 | 1 | ...... | n-2 | n-1 | 70 // +---+---+--------------+-----+-----+ 71 val freeList = Module(new FreeList( 72 size = LoadQueueRARSize, 73 allocWidth = LoadPipelineWidth, 74 freeWidth = 4, 75 moduleName = "LoadQueueRAR freelist" 76 )) 77 freeList.io := DontCare 78 79 // Real-allocation: load_s2 80 // PAddr write needs 2 cycles, release signal should delay 1 cycle so that 81 // load enqueue can catch release. 82 val release1Cycle = io.release 83 val release2Cycle = RegNext(io.release) 84 val release2Cycle_dup_lsu = RegNext(io.release) 85 86 // LoadQueueRAR enqueue condition: 87 // There are still not completed load instructions before the current load instruction. 88 // (e.g. "not completed" means that load instruction get the data or exception). 89 val canEnqueue = io.query.map(_.req.valid) 90 val cancelEnqueue = io.query.map(_.req.bits.uop.robIdx.needFlush(io.redirect)) 91 val hasNotWritebackedLoad = io.query.map(_.req.bits.uop.lqIdx).map(lqIdx => isAfter(lqIdx, io.ldWbPtr)) 92 val needEnqueue = canEnqueue.zip(hasNotWritebackedLoad).zip(cancelEnqueue).map { case ((v, r), c) => v && r && !c } 93 94 // Allocate logic 95 val enqValidVec = Wire(Vec(LoadPipelineWidth, Bool())) 96 val enqIndexVec = Wire(Vec(LoadPipelineWidth, UInt())) 97 98 for ((enq, w) <- io.query.map(_.req).zipWithIndex) { 99 paddrModule.io.wen(w) := false.B 100 freeList.io.doAllocate(w) := false.B 101 102 freeList.io.allocateReq(w) := needEnqueue(w) 103 104 // Allocate ready 105 enqValidVec(w) := freeList.io.canAllocate(w) 106 enqIndexVec(w) := freeList.io.allocateSlot(w) 107 enq.ready := Mux(needEnqueue(w), enqValidVec(w), true.B) 108 109 val enqIndex = enqIndexVec(w) 110 when (needEnqueue(w) && enq.ready) { 111 val debug_robIdx = enq.bits.uop.robIdx.asUInt 112 XSError(allocated(enqIndex), p"LoadQueueRAR: You can not write an valid entry! check: ldu $w, robIdx $debug_robIdx") 113 114 freeList.io.doAllocate(w) := true.B 115 116 // Allocate new entry 117 allocated(enqIndex) := true.B 118 119 // Write paddr 120 paddrModule.io.wen(w) := true.B 121 paddrModule.io.waddr(w) := enqIndex 122 paddrModule.io.wdata(w) := enq.bits.paddr 123 124 // Fill info 125 uop(enqIndex) := enq.bits.uop 126 released(enqIndex) := 127 enq.bits.datavalid && 128 (release2Cycle.valid && 129 enq.bits.paddr(PAddrBits-1, DCacheLineOffset) === release2Cycle.bits.paddr(PAddrBits-1, DCacheLineOffset) || 130 release1Cycle.valid && 131 enq.bits.paddr(PAddrBits-1, DCacheLineOffset) === release1Cycle.bits.paddr(PAddrBits-1, DCacheLineOffset)) 132 } 133 } 134 135 // LoadQueueRAR deallocate 136 val freeMaskVec = Wire(Vec(LoadQueueRARSize, Bool())) 137 138 // init 139 freeMaskVec.map(e => e := false.B) 140 141 // when the loads that "older than" current load were writebacked, 142 // current load will be released. 143 for (i <- 0 until LoadQueueRARSize) { 144 val deqNotBlock = !isBefore(io.ldWbPtr, uop(i).lqIdx) 145 val needFlush = uop(i).robIdx.needFlush(io.redirect) 146 147 when (allocated(i) && (deqNotBlock || needFlush)) { 148 allocated(i) := false.B 149 freeMaskVec(i) := true.B 150 } 151 } 152 153 // if need replay release entry 154 val lastCanAccept = RegNext(VecInit(needEnqueue.zip(enqValidVec).map(x => x._1 && x._2))) 155 val lastAllocIndex = RegNext(enqIndexVec) 156 157 for ((release, w) <- io.query.map(_.release).zipWithIndex) { 158 val releaseValid = release && lastCanAccept(w) 159 val releaseIndex = lastAllocIndex(w) 160 161 when (allocated(releaseIndex) && releaseValid) { 162 allocated(releaseIndex) := false.B 163 freeMaskVec(releaseIndex) := true.B 164 } 165 } 166 167 freeList.io.free := freeMaskVec.asUInt 168 169 // LoadQueueRAR Query 170 // Load-to-Load violation check condition: 171 // 1. Physical address match by CAM port. 172 // 2. release is set. 173 // 3. Younger than current load instruction. 174 val ldLdViolation = Wire(Vec(LoadPipelineWidth, Bool())) 175 val allocatedUInt = RegNext(allocated.asUInt) 176 for ((query, w) <- io.query.zipWithIndex) { 177 ldLdViolation(w) := false.B 178 paddrModule.io.releaseViolationMdata(w) := query.req.bits.paddr 179 180 query.resp.valid := RegNext(query.req.valid) 181 // Generate real violation mask 182 val robIdxMask = VecInit(uop.map(_.robIdx).map(isAfter(_, query.req.bits.uop.robIdx))) 183 val matchMask = allocatedUInt & 184 RegNext(paddrModule.io.releaseViolationMmask(w).asUInt) & 185 RegNext(robIdxMask.asUInt) 186 // Load-to-Load violation check result 187 val ldLdViolationMask = WireInit(matchMask & RegNext(released.asUInt)) 188 ldLdViolationMask.suggestName("ldLdViolationMask_" + w) 189 query.resp.bits.replayFromFetch := ldLdViolationMask.orR 190 } 191 192 193 // When io.release.valid (release1cycle.valid), it uses the last ld-ld paddr cam port to 194 // update release flag in 1 cycle 195 val releaseVioMask = Reg(Vec(LoadQueueRARSize, Bool())) 196 when (release1Cycle.valid) { 197 paddrModule.io.releaseMdata.takeRight(1)(0) := release1Cycle.bits.paddr 198 } 199 200 (0 until LoadQueueRARSize).map(i => { 201 when (RegNext(paddrModule.io.releaseMmask.takeRight(1)(0)(i) && allocated(i) && release1Cycle.valid)) { 202 // Note: if a load has missed in dcache and is waiting for refill in load queue, 203 // its released flag still needs to be set as true if addr matches. 204 released(i) := true.B 205 } 206 }) 207 208 io.lqFull := freeList.io.empty 209 210 // perf cnt 211 val canEnqCount = PopCount(io.query.map(_.req.fire)) 212 val validCount = freeList.io.validCount 213 val allowEnqueue = validCount <= (LoadQueueRARSize - LoadPipelineWidth).U 214 val ldLdViolationCount = PopCount(io.query.map(_.resp).map(resp => resp.valid && resp.bits.replayFromFetch)) 215 216 QueuePerf(LoadQueueRARSize, validCount, !allowEnqueue) 217 XSPerfAccumulate("enq", canEnqCount) 218 XSPerfAccumulate("ld_ld_violation", ldLdViolationCount) 219 val perfEvents: Seq[(String, UInt)] = Seq( 220 ("enq", canEnqCount), 221 ("ld_ld_violation", ldLdViolationCount) 222 ) 223 generatePerfEvent() 224 // End 225}