1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import chisel3._ 20import chisel3.util._ 21import org.chipsalliance.cde.config._ 22import xiangshan._ 23import xiangshan.backend.rob.RobPtr 24import xiangshan.cache._ 25import xiangshan.frontend.FtqPtr 26import xiangshan.mem.mdp._ 27import utils._ 28import utility._ 29import xiangshan.backend.Bundles.DynInst 30 31class LoadQueueRAW(implicit p: Parameters) extends XSModule 32 with HasDCacheParameters 33 with HasCircularQueuePtrHelper 34 with HasLoadHelper 35 with HasPerfEvents 36{ 37 val io = IO(new Bundle() { 38 // control 39 val redirect = Flipped(ValidIO(new Redirect)) 40 val vecFeedback = Vec(VecLoadPipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO))) 41 42 // violation query 43 val query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) 44 45 // from store unit s1 46 val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) 47 48 // global rollback flush 49 val rollback = Vec(StorePipelineWidth,Output(Valid(new Redirect))) 50 51 // to LoadQueueReplay 52 val stAddrReadySqPtr = Input(new SqPtr) 53 val stIssuePtr = Input(new SqPtr) 54 val lqFull = Output(Bool()) 55 }) 56 57 private def PartialPAddrWidth: Int = 24 58 private def genPartialPAddr(paddr: UInt) = { 59 paddr(DCacheVWordOffset + PartialPAddrWidth - 1, DCacheVWordOffset) 60 } 61 62 println("LoadQueueRAW: size " + LoadQueueRAWSize) 63 // LoadQueueRAW field 64 // +-------+--------+-------+-------+-----------+ 65 // | Valid | uop |PAddr | Mask | Datavalid | 66 // +-------+--------+-------+-------+-----------+ 67 // 68 // Field descriptions: 69 // Allocated : entry has been allocated already 70 // MicroOp : inst's microOp 71 // PAddr : physical address. 72 // Mask : data mask 73 // Datavalid : data valid 74 // 75 val allocated = RegInit(VecInit(List.fill(LoadQueueRAWSize)(false.B))) // The control signals need to explicitly indicate the initial value 76 val uop = Reg(Vec(LoadQueueRAWSize, new DynInst)) 77 val paddrModule = Module(new LqPAddrModule( 78 gen = UInt(PartialPAddrWidth.W), 79 numEntries = LoadQueueRAWSize, 80 numRead = LoadPipelineWidth, 81 numWrite = LoadPipelineWidth, 82 numWBank = LoadQueueNWriteBanks, 83 numWDelay = 2, 84 numCamPort = StorePipelineWidth 85 )) 86 paddrModule.io := DontCare 87 val maskModule = Module(new LqMaskModule( 88 gen = UInt((VLEN/8).W), 89 numEntries = LoadQueueRAWSize, 90 numRead = LoadPipelineWidth, 91 numWrite = LoadPipelineWidth, 92 numWBank = LoadQueueNWriteBanks, 93 numWDelay = 2, 94 numCamPort = StorePipelineWidth 95 )) 96 maskModule.io := DontCare 97 val datavalid = RegInit(VecInit(List.fill(LoadQueueRAWSize)(false.B))) 98 99 // freeliset: store valid entries index. 100 // +---+---+--------------+-----+-----+ 101 // | 0 | 1 | ...... | n-2 | n-1 | 102 // +---+---+--------------+-----+-----+ 103 val freeList = Module(new FreeList( 104 size = LoadQueueRAWSize, 105 allocWidth = LoadPipelineWidth, 106 freeWidth = 4, 107 enablePreAlloc = true, 108 moduleName = "LoadQueueRAW freelist" 109 )) 110 freeList.io := DontCare 111 112 // LoadQueueRAW enqueue 113 val canEnqueue = io.query.map(_.req.valid) 114 val cancelEnqueue = io.query.map(_.req.bits.uop.robIdx.needFlush(io.redirect)) 115 val allAddrCheck = io.stIssuePtr === io.stAddrReadySqPtr 116 val hasAddrInvalidStore = io.query.map(_.req.bits.uop.sqIdx).map(sqIdx => { 117 Mux(!allAddrCheck, isBefore(io.stAddrReadySqPtr, sqIdx), false.B) 118 }) 119 val needEnqueue = canEnqueue.zip(hasAddrInvalidStore).zip(cancelEnqueue).map { case ((v, r), c) => v && r && !c } 120 121 // Allocate logic 122 val acceptedVec = Wire(Vec(LoadPipelineWidth, Bool())) 123 val enqIndexVec = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueRAWSize).W))) 124 125 // Enqueue 126 for ((enq, w) <- io.query.map(_.req).zipWithIndex) { 127 acceptedVec(w) := false.B 128 paddrModule.io.wen(w) := false.B 129 maskModule.io.wen(w) := false.B 130 freeList.io.doAllocate(w) := false.B 131 132 freeList.io.allocateReq(w) := true.B 133 134 // Allocate ready 135 val offset = PopCount(needEnqueue.take(w)) 136 val canAccept = freeList.io.canAllocate(offset) 137 val enqIndex = freeList.io.allocateSlot(offset) 138 enq.ready := Mux(needEnqueue(w), canAccept, true.B) 139 140 enqIndexVec(w) := enqIndex 141 when (needEnqueue(w) && enq.ready) { 142 acceptedVec(w) := true.B 143 144 freeList.io.doAllocate(w) := true.B 145 146 // Allocate new entry 147 allocated(enqIndex) := true.B 148 149 // Write paddr 150 paddrModule.io.wen(w) := true.B 151 paddrModule.io.waddr(w) := enqIndex 152 paddrModule.io.wdata(w) := genPartialPAddr(enq.bits.paddr) 153 154 // Write mask 155 maskModule.io.wen(w) := true.B 156 maskModule.io.waddr(w) := enqIndex 157 maskModule.io.wdata(w) := enq.bits.mask 158 159 // Fill info 160 uop(enqIndex) := enq.bits.uop 161 datavalid(enqIndex) := enq.bits.data_valid 162 } 163 val debug_robIdx = enq.bits.uop.robIdx.asUInt 164 XSError(needEnqueue(w) && enq.ready && allocated(enqIndex), p"LoadQueueRAW: You can not write an valid entry! check: ldu $w, robIdx $debug_robIdx") 165 } 166 167 for ((query, w) <- io.query.map(_.resp).zipWithIndex) { 168 query.valid := RegNext(io.query(w).req.valid) 169 query.bits.rep_frm_fetch := RegNext(false.B) 170 } 171 172 // LoadQueueRAW deallocate 173 val freeMaskVec = Wire(Vec(LoadQueueRAWSize, Bool())) 174 175 // init 176 freeMaskVec.map(e => e := false.B) 177 178 // when the stores that "older than" current load address were ready. 179 // current load will be released. 180 val vecLdCanceltmp = Wire(Vec(LoadQueueRAWSize, Vec(VecLoadPipelineWidth, Bool()))) 181 val vecLdCancel = Wire(Vec(LoadQueueRAWSize, Bool())) 182 for (i <- 0 until LoadQueueRAWSize) { 183 val deqNotBlock = Mux(!allAddrCheck, !isBefore(io.stAddrReadySqPtr, uop(i).sqIdx), true.B) 184 val needCancel = uop(i).robIdx.needFlush(io.redirect) 185 val fbk = io.vecFeedback 186 for (j <- 0 until VecLoadPipelineWidth) { 187 vecLdCanceltmp(i)(j) := allocated(i) && fbk(j).valid && fbk(j).bits.isFlush && uop(i).robIdx === fbk(j).bits.robidx && uop(i).uopIdx === fbk(j).bits.uopidx 188 } 189 vecLdCancel(i) := vecLdCanceltmp(i).reduce(_ || _) 190 191 when (allocated(i) && (deqNotBlock || needCancel || vecLdCancel(i))) { 192 allocated(i) := false.B 193 freeMaskVec(i) := true.B 194 } 195 } 196 197 // if need replay deallocate entry 198 val lastCanAccept = GatedValidRegNext(acceptedVec) 199 val lastAllocIndex = GatedRegNext(enqIndexVec) 200 201 for ((revoke, w) <- io.query.map(_.revoke).zipWithIndex) { 202 val revokeValid = revoke && lastCanAccept(w) 203 val revokeIndex = lastAllocIndex(w) 204 205 when (allocated(revokeIndex) && revokeValid) { 206 allocated(revokeIndex) := false.B 207 freeMaskVec(revokeIndex) := true.B 208 } 209 } 210 freeList.io.free := freeMaskVec.asUInt 211 212 io.lqFull := freeList.io.empty 213 214 /** 215 * Store-Load Memory violation detection 216 * Scheme 1(Current scheme): flush the pipeline then re-fetch from the load instruction (like old load queue). 217 * Scheme 2 : re-fetch instructions from the first instruction after the store instruction. 218 * 219 * When store writes back, it searches LoadQueue for younger load instructions 220 * with the same load physical address. They loaded wrong data and need re-execution. 221 * 222 * Cycle 0: Store Writeback 223 * Generate match vector for store address with rangeMask(stPtr, enqPtr). 224 * Cycle 1: Select oldest load from select group. 225 * Cycle x: Redirect Fire 226 * Choose the oldest load from LoadPipelineWidth oldest loads. 227 * Prepare redirect request according to the detected violation. 228 * Fire redirect request (if valid) 229 */ 230 // SelectGroup 0 SelectGroup 1 SelectGroup y 231 // stage 0: lq lq lq ...... lq lq lq ....... lq lq lq 232 // | | | | | | | | | 233 // stage 1: lq lq lq ...... lq lq lq ....... lq lq lq 234 // \ | / ...... \ | / ....... \ | / 235 // stage 2: lq lq lq 236 // \ | / ....... \ | / ........ \ | / 237 // stage 3: lq lq lq 238 // ... 239 // ... 240 // | 241 // stage x: lq 242 // | 243 // rollback req 244 245 // select logic 246 val SelectGroupSize = RollbackGroupSize 247 val lgSelectGroupSize = log2Ceil(SelectGroupSize) 248 val TotalSelectCycles = scala.math.ceil(log2Ceil(LoadQueueRAWSize).toFloat / lgSelectGroupSize).toInt + 1 249 250 def selectPartialOldest[T <: XSBundleWithMicroOp](valid: Seq[Bool], bits: Seq[T]): (Seq[Bool], Seq[T]) = { 251 assert(valid.length == bits.length) 252 if (valid.length == 0 || valid.length == 1) { 253 (valid, bits) 254 } else if (valid.length == 2) { 255 val res = Seq.fill(2)(Wire(ValidIO(chiselTypeOf(bits(0))))) 256 for (i <- res.indices) { 257 res(i).valid := valid(i) 258 res(i).bits := bits(i) 259 } 260 val oldest = Mux(valid(0) && valid(1), Mux(isAfter(bits(0).uop.robIdx, bits(1).uop.robIdx), res(1), res(0)), Mux(valid(0) && !valid(1), res(0), res(1))) 261 (Seq(oldest.valid), Seq(oldest.bits)) 262 } else { 263 val left = selectPartialOldest(valid.take(valid.length / 2), bits.take(bits.length / 2)) 264 val right = selectPartialOldest(valid.takeRight(valid.length - (valid.length / 2)), bits.takeRight(bits.length - (bits.length / 2))) 265 selectPartialOldest(left._1 ++ right._1, left._2 ++ right._2) 266 } 267 } 268 269 def selectOldest[T <: XSBundleWithMicroOp](valid: Seq[Bool], bits: Seq[T]): (Seq[Bool], Seq[T]) = { 270 assert(valid.length == bits.length) 271 val numSelectGroups = scala.math.ceil(valid.length.toFloat / SelectGroupSize).toInt 272 273 // group info 274 val selectValidGroups = valid.grouped(SelectGroupSize).toList 275 val selectBitsGroups = bits.grouped(SelectGroupSize).toList 276 // select logic 277 if (valid.length <= SelectGroupSize) { 278 val (selValid, selBits) = selectPartialOldest(valid, bits) 279 val selValidNext = GatedValidRegNext(selValid(0)) 280 val selBitsNext = RegEnable(selBits(0), selValid(0)) 281 (Seq(selValidNext && !selBitsNext.uop.robIdx.needFlush(RegNext(io.redirect))), Seq(selBitsNext)) 282 } else { 283 val select = (0 until numSelectGroups).map(g => { 284 val (selValid, selBits) = selectPartialOldest(selectValidGroups(g), selectBitsGroups(g)) 285 val selValidNext = RegNext(selValid(0)) 286 val selBitsNext = RegEnable(selBits(0), selValid(0)) 287 (selValidNext && !selBitsNext.uop.robIdx.needFlush(io.redirect) && !selBitsNext.uop.robIdx.needFlush(RegNext(io.redirect)), selBitsNext) 288 }) 289 selectOldest(select.map(_._1), select.map(_._2)) 290 } 291 } 292 293 val storeIn = io.storeIn 294 295 def detectRollback(i: Int) = { 296 paddrModule.io.violationMdata(i) := genPartialPAddr(RegEnable(storeIn(i).bits.paddr, storeIn(i).valid)) 297 maskModule.io.violationMdata(i) := RegEnable(storeIn(i).bits.mask, storeIn(i).valid) 298 299 val addrMaskMatch = paddrModule.io.violationMmask(i).asUInt & maskModule.io.violationMmask(i).asUInt 300 val entryNeedCheck = GatedValidRegNext(VecInit((0 until LoadQueueRAWSize).map(j => { 301 allocated(j) && storeIn(i).valid && isAfter(uop(j).robIdx, storeIn(i).bits.uop.robIdx) && datavalid(j) && !uop(j).robIdx.needFlush(io.redirect) 302 }))) 303 val lqViolationSelVec = VecInit((0 until LoadQueueRAWSize).map(j => { 304 addrMaskMatch(j) && entryNeedCheck(j) 305 })) 306 307 val lqViolationSelUopExts = uop.map(uop => { 308 val wrapper = Wire(new XSBundleWithMicroOp) 309 wrapper.uop := uop 310 wrapper 311 }) 312 313 // select logic 314 val lqSelect: (Seq[Bool], Seq[XSBundleWithMicroOp]) = selectOldest(lqViolationSelVec, lqViolationSelUopExts) 315 316 // select one inst 317 val lqViolation = lqSelect._1(0) 318 val lqViolationUop = lqSelect._2(0).uop 319 320 XSDebug( 321 lqViolation, 322 "need rollback (ld wb before store) pc %x robidx %d target %x\n", 323 storeIn(i).bits.uop.pc, storeIn(i).bits.uop.robIdx.asUInt, lqViolationUop.robIdx.asUInt 324 ) 325 326 (lqViolation, lqViolationUop) 327 } 328 329 // select rollback (part1) and generate rollback request 330 // rollback check 331 // Lq rollback seq check is done in s3 (next stage), as getting rollbackLq MicroOp is slow 332 val rollbackLqWb = Wire(Vec(StorePipelineWidth, Valid(new DynInst))) 333 val stFtqIdx = Wire(Vec(StorePipelineWidth, new FtqPtr)) 334 val stFtqOffset = Wire(Vec(StorePipelineWidth, UInt(log2Up(PredictWidth).W))) 335 for (w <- 0 until StorePipelineWidth) { 336 val detectedRollback = detectRollback(w) 337 rollbackLqWb(w).valid := detectedRollback._1 && DelayN(storeIn(w).valid && !storeIn(w).bits.miss, TotalSelectCycles) 338 rollbackLqWb(w).bits := detectedRollback._2 339 stFtqIdx(w) := DelayNWithValid(storeIn(w).bits.uop.ftqPtr, storeIn(w).valid, TotalSelectCycles)._2 340 stFtqOffset(w) := DelayNWithValid(storeIn(w).bits.uop.ftqOffset, storeIn(w).valid, TotalSelectCycles)._2 341 } 342 343 // select rollback (part2), generate rollback request, then fire rollback request 344 // Note that we use robIdx - 1.U to flush the load instruction itself. 345 // Thus, here if last cycle's robIdx equals to this cycle's robIdx, it still triggers the redirect. 346 347 // select uop in parallel 348 349 val allRedirect = (0 until StorePipelineWidth).map(i => { 350 val redirect = Wire(Valid(new Redirect)) 351 redirect.valid := rollbackLqWb(i).valid 352 redirect.bits := DontCare 353 redirect.bits.isRVC := rollbackLqWb(i).bits.preDecodeInfo.isRVC 354 redirect.bits.robIdx := rollbackLqWb(i).bits.robIdx 355 redirect.bits.ftqIdx := rollbackLqWb(i).bits.ftqPtr 356 redirect.bits.ftqOffset := rollbackLqWb(i).bits.ftqOffset 357 redirect.bits.stFtqIdx := stFtqIdx(i) 358 redirect.bits.stFtqOffset := stFtqOffset(i) 359 redirect.bits.level := RedirectLevel.flush 360 redirect.bits.cfiUpdate.target := rollbackLqWb(i).bits.pc 361 redirect.bits.debug_runahead_checkpoint_id := rollbackLqWb(i).bits.debugInfo.runahead_checkpoint_id 362 redirect 363 }) 364 io.rollback := allRedirect 365 366 // perf cnt 367 val canEnqCount = PopCount(io.query.map(_.req.fire)) 368 val validCount = freeList.io.validCount 369 val allowEnqueue = validCount <= (LoadQueueRAWSize - LoadPipelineWidth).U 370 val rollbaclValid = io.rollback.map(_.valid).reduce(_ || _).asUInt 371 372 QueuePerf(LoadQueueRAWSize, validCount, !allowEnqueue) 373 XSPerfAccumulate("enqs", canEnqCount) 374 XSPerfAccumulate("stld_rollback", rollbaclValid) 375 val perfEvents: Seq[(String, UInt)] = Seq( 376 ("enq ", canEnqCount), 377 ("stld_rollback", rollbaclValid), 378 ) 379 generatePerfEvent() 380 // end 381}