1package xiangshan.mem 2 3import chisel3._ 4import chisel3.util._ 5import freechips.rocketchip.tile.HasFPUParameters 6import utils._ 7import xiangshan._ 8import xiangshan.cache._ 9import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO} 10import xiangshan.backend.LSUOpType 11import xiangshan.mem._ 12import xiangshan.backend.roq.RoqPtr 13import xiangshan.backend.fu.HasExceptionNO 14 15 16class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { } 17 18object LqPtr extends HasXSParameter { 19 def apply(f: Bool, v: UInt): LqPtr = { 20 val ptr = Wire(new LqPtr) 21 ptr.flag := f 22 ptr.value := v 23 ptr 24 } 25} 26 27trait HasLoadHelper { this: XSModule => 28 def rdataHelper(uop: MicroOp, rdata: UInt): UInt = { 29 val fpWen = uop.ctrl.fpWen 30 LookupTree(uop.ctrl.fuOpType, List( 31 LSUOpType.lb -> SignExt(rdata(7, 0) , XLEN), 32 LSUOpType.lh -> SignExt(rdata(15, 0), XLEN), 33 LSUOpType.lw -> Mux(fpWen, rdata, SignExt(rdata(31, 0), XLEN)), 34 LSUOpType.ld -> Mux(fpWen, rdata, SignExt(rdata(63, 0), XLEN)), 35 LSUOpType.lbu -> ZeroExt(rdata(7, 0) , XLEN), 36 LSUOpType.lhu -> ZeroExt(rdata(15, 0), XLEN), 37 LSUOpType.lwu -> ZeroExt(rdata(31, 0), XLEN), 38 )) 39 } 40 41 def fpRdataHelper(uop: MicroOp, rdata: UInt): UInt = { 42 LookupTree(uop.ctrl.fuOpType, List( 43 LSUOpType.lw -> recode(rdata(31, 0), S), 44 LSUOpType.ld -> recode(rdata(63, 0), D) 45 )) 46 } 47} 48 49class LqEnqIO extends XSBundle { 50 val canAccept = Output(Bool()) 51 val sqCanAccept = Input(Bool()) 52 val needAlloc = Vec(RenameWidth, Input(Bool())) 53 val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp))) 54 val resp = Vec(RenameWidth, Output(new LqPtr)) 55} 56 57// Load Queue 58class LoadQueue extends XSModule 59 with HasDCacheParameters 60 with HasCircularQueuePtrHelper 61 with HasLoadHelper 62 with HasExceptionNO 63{ 64 val io = IO(new Bundle() { 65 val enq = new LqEnqIO 66 val brqRedirect = Input(Valid(new Redirect)) 67 val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle))) 68 val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) 69 val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load 70 val load_s1 = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO)) 71 val commits = Flipped(new RoqCommitIO) 72 val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store 73 val dcache = Flipped(ValidIO(new Refill)) 74 val uncache = new DCacheWordIO 75 val roqDeqPtr = Input(new RoqPtr) 76 val exceptionAddr = new ExceptionAddrIO 77 }) 78 79 val uop = Reg(Vec(LoadQueueSize, new MicroOp)) 80 // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry)) 81 val dataModule = Module(new LoadQueueData(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth)) 82 dataModule.io := DontCare 83 val vaddrModule = Module(new AsyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 1, numWrite = LoadPipelineWidth)) 84 vaddrModule.io := DontCare 85 val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated 86 val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid 87 val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB 88 val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB 89 val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request 90 // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result 91 val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq 92 93 val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst 94 95 val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr)))) 96 val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr)) 97 val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W)) 98 val allowEnqueue = RegInit(true.B) 99 100 val enqPtr = enqPtrExt(0).value 101 val deqPtr = deqPtrExt.value 102 val sameFlag = enqPtrExt(0).flag === deqPtrExt.flag 103 val isEmpty = enqPtr === deqPtr && sameFlag 104 val isFull = enqPtr === deqPtr && !sameFlag 105 val allowIn = !isFull 106 107 val loadCommit = (0 until CommitWidth).map(i => io.commits.valid(i) && !io.commits.isWalk && io.commits.info(i).commitType === CommitType.LOAD) 108 val mcommitIdx = (0 until CommitWidth).map(i => io.commits.info(i).lqIdx.value) 109 110 val deqMask = UIntToMask(deqPtr, LoadQueueSize) 111 val enqMask = UIntToMask(enqPtr, LoadQueueSize) 112 113 /** 114 * Enqueue at dispatch 115 * 116 * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth) 117 */ 118 io.enq.canAccept := allowEnqueue 119 120 for (i <- 0 until RenameWidth) { 121 val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i)) 122 val lqIdx = enqPtrExt(offset) 123 val index = lqIdx.value 124 when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid) { 125 uop(index) := io.enq.req(i).bits 126 allocated(index) := true.B 127 datavalid(index) := false.B 128 writebacked(index) := false.B 129 commited(index) := false.B 130 miss(index) := false.B 131 // listening(index) := false.B 132 pending(index) := false.B 133 } 134 io.enq.resp(i) := lqIdx 135 } 136 XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n") 137 138 /** 139 * Writeback load from load units 140 * 141 * Most load instructions writeback to regfile at the same time. 142 * However, 143 * (1) For an mmio instruction with exceptions, it writes back to ROB immediately. 144 * (2) For an mmio instruction without exceptions, it does not write back. 145 * The mmio instruction will be sent to lower level when it reaches ROB's head. 146 * After uncache response, it will write back through arbiter with loadUnit. 147 * (3) For cache misses, it is marked miss and sent to dcache later. 148 * After cache refills, it will write back through arbiter with loadUnit. 149 */ 150 for (i <- 0 until LoadPipelineWidth) { 151 dataModule.io.wb.wen(i) := false.B 152 vaddrModule.io.wen(i) := false.B 153 when(io.loadIn(i).fire()) { 154 when(io.loadIn(i).bits.miss) { 155 XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n", 156 io.loadIn(i).bits.uop.lqIdx.asUInt, 157 io.loadIn(i).bits.uop.cf.pc, 158 io.loadIn(i).bits.vaddr, 159 io.loadIn(i).bits.paddr, 160 io.loadIn(i).bits.data, 161 io.loadIn(i).bits.mask, 162 io.loadIn(i).bits.forwardData.asUInt, 163 io.loadIn(i).bits.forwardMask.asUInt, 164 io.loadIn(i).bits.mmio, 165 io.loadIn(i).bits.rollback, 166 io.loadIn(i).bits.uop.cf.exceptionVec.asUInt 167 ) 168 }.otherwise { 169 XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n", 170 io.loadIn(i).bits.uop.lqIdx.asUInt, 171 io.loadIn(i).bits.uop.cf.pc, 172 io.loadIn(i).bits.vaddr, 173 io.loadIn(i).bits.paddr, 174 io.loadIn(i).bits.data, 175 io.loadIn(i).bits.mask, 176 io.loadIn(i).bits.forwardData.asUInt, 177 io.loadIn(i).bits.forwardMask.asUInt, 178 io.loadIn(i).bits.mmio, 179 io.loadIn(i).bits.rollback, 180 io.loadIn(i).bits.uop.cf.exceptionVec.asUInt 181 ) 182 } 183 val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value 184 datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 185 writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 186 187 val loadWbData = Wire(new LQDataEntry) 188 loadWbData.paddr := io.loadIn(i).bits.paddr 189 loadWbData.mask := io.loadIn(i).bits.mask 190 loadWbData.data := io.loadIn(i).bits.data // fwd data 191 loadWbData.fwdMask := io.loadIn(i).bits.forwardMask 192 loadWbData.exception := selectLoad(io.loadIn(i).bits.uop.cf.exceptionVec) 193 dataModule.io.wbWrite(i, loadWbIndex, loadWbData) 194 dataModule.io.wb.wen(i) := true.B 195 196 vaddrModule.io.waddr(i) := loadWbIndex 197 vaddrModule.io.wdata(i) := io.loadIn(i).bits.vaddr 198 vaddrModule.io.wen(i) := true.B 199 200 debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio 201 202 val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 203 val hasException = selectLoad(io.loadIn(i).bits.uop.cf.exceptionVec, false).asUInt.orR 204 miss(loadWbIndex) := dcacheMissed && !hasException 205 pending(loadWbIndex) := io.loadIn(i).bits.mmio && !hasException 206 } 207 } 208 209 /** 210 * Cache miss request 211 * 212 * (1) writeback: miss 213 * (2) send to dcache: listing 214 * (3) dcache response: datavalid 215 * (4) writeback to ROB: writeback 216 */ 217 // val inflightReqs = RegInit(VecInit(Seq.fill(cfg.nLoadMissEntries)(0.U.asTypeOf(new InflightBlockInfo)))) 218 // val inflightReqFull = inflightReqs.map(req => req.valid).reduce(_&&_) 219 // val reqBlockIndex = PriorityEncoder(~VecInit(inflightReqs.map(req => req.valid)).asUInt) 220 221 // val missRefillSelVec = VecInit( 222 // (0 until LoadQueueSize).map{ i => 223 // val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(dataModule.io.rdata(i).paddr)).reduce(_||_) 224 // allocated(i) && miss(i) && !inflight 225 // }) 226 227 // val missRefillSel = getFirstOne(missRefillSelVec, deqMask) 228 // val missRefillBlockAddr = get_block_addr(dataModule.io.rdata(missRefillSel).paddr) 229 // io.dcache.req.valid := missRefillSelVec.asUInt.orR 230 // io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD 231 // io.dcache.req.bits.addr := missRefillBlockAddr 232 // io.dcache.req.bits.data := DontCare 233 // io.dcache.req.bits.mask := DontCare 234 235 // io.dcache.req.bits.meta.id := DontCare 236 // io.dcache.req.bits.meta.vaddr := DontCare // dataModule.io.rdata(missRefillSel).vaddr 237 // io.dcache.req.bits.meta.paddr := missRefillBlockAddr 238 // io.dcache.req.bits.meta.uop := uop(missRefillSel) 239 // io.dcache.req.bits.meta.mmio := false.B // dataModule.io.rdata(missRefillSel).mmio 240 // io.dcache.req.bits.meta.tlb_miss := false.B 241 // io.dcache.req.bits.meta.mask := DontCare 242 // io.dcache.req.bits.meta.replay := false.B 243 244 // assert(!(dataModule.io.rdata(missRefillSel).mmio && io.dcache.req.valid)) 245 246 // when(io.dcache.req.fire()) { 247 // miss(missRefillSel) := false.B 248 // listening(missRefillSel) := true.B 249 250 // mark this block as inflight 251 // inflightReqs(reqBlockIndex).valid := true.B 252 // inflightReqs(reqBlockIndex).block_addr := missRefillBlockAddr 253 // assert(!inflightReqs(reqBlockIndex).valid) 254 // } 255 256 // when(io.dcache.resp.fire()) { 257 // val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)).reduce(_||_) 258 // assert(inflight) 259 // for (i <- 0 until cfg.nLoadMissEntries) { 260 // when (inflightReqs(i).valid && inflightReqs(i).block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)) { 261 // inflightReqs(i).valid := false.B 262 // } 263 // } 264 // } 265 266 267 // when(io.dcache.req.fire()){ 268 // XSDebug("miss req: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x vaddr:0x%x\n", 269 // io.dcache.req.bits.meta.uop.cf.pc, io.dcache.req.bits.meta.uop.roqIdx.asUInt, io.dcache.req.bits.meta.uop.lqIdx.asUInt, 270 // io.dcache.req.bits.addr, io.dcache.req.bits.meta.vaddr 271 // ) 272 // } 273 274 when(io.dcache.valid) { 275 XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data) 276 } 277 278 // Refill 64 bit in a cycle 279 // Refill data comes back from io.dcache.resp 280 dataModule.io.refill.valid := io.dcache.valid 281 dataModule.io.refill.paddr := io.dcache.bits.addr 282 dataModule.io.refill.data := io.dcache.bits.data 283 284 (0 until LoadQueueSize).map(i => { 285 dataModule.io.refill.refillMask(i) := allocated(i) && miss(i) 286 when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) { 287 datavalid(i) := true.B 288 miss(i) := false.B 289 } 290 }) 291 292 // Writeback up to 2 missed load insts to CDB 293 // 294 // Pick 2 missed load (data refilled), write them back to cdb 295 // 2 refilled load will be selected from even/odd entry, separately 296 297 // Stage 0 298 // Generate writeback indexes 299 val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => { 300 allocated(i) && !writebacked(i) && datavalid(i) 301 })).asUInt() // use uint instead vec to reduce verilog lines 302 val loadEvenSelVec = VecInit((0 until LoadQueueSize/2).map(i => {loadWbSelVec(2*i)})) 303 val loadOddSelVec = VecInit((0 until LoadQueueSize/2).map(i => {loadWbSelVec(2*i+1)})) 304 val evenDeqMask = VecInit((0 until LoadQueueSize/2).map(i => {deqMask(2*i)})).asUInt 305 val oddDeqMask = VecInit((0 until LoadQueueSize/2).map(i => {deqMask(2*i+1)})).asUInt 306 307 val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) 308 val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool())) 309 loadWbSelGen(0) := Cat(getFirstOne(loadEvenSelVec, evenDeqMask), 0.U(1.W)) 310 loadWbSelVGen(0):= loadEvenSelVec.asUInt.orR 311 loadWbSelGen(1) := Cat(getFirstOne(loadOddSelVec, oddDeqMask), 1.U(1.W)) 312 loadWbSelVGen(1) := loadOddSelVec.asUInt.orR 313 314 val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) 315 val loadWbSelV = RegInit(VecInit(List.fill(LoadPipelineWidth)(false.B))) 316 (0 until LoadPipelineWidth).map(i => { 317 val canGo = io.ldout(i).fire() || !loadWbSelV(i) 318 val valid = loadWbSelVGen(i) 319 // store selected index in pipeline reg 320 loadWbSel(i) := RegEnable(loadWbSelGen(i), valid && canGo) 321 // Mark them as writebacked, so they will not be selected in the next cycle 322 when(valid && canGo){ 323 writebacked(loadWbSelGen(i)) := true.B 324 } 325 // update loadWbSelValidReg 326 when(io.ldout(i).fire()){ 327 loadWbSelV(i) := false.B 328 } 329 when(valid && canGo){ 330 loadWbSelV(i) := true.B 331 } 332 }) 333 334 // Stage 1 335 // Use indexes generated in cycle 0 to read data 336 // writeback data to cdb 337 (0 until LoadPipelineWidth).map(i => { 338 // data select 339 dataModule.io.wb.raddr(i) := loadWbSel(i) 340 val rdata = dataModule.io.wb.rdata(i).data 341 val seluop = uop(loadWbSel(i)) 342 val func = seluop.ctrl.fuOpType 343 val raddr = dataModule.io.wb.rdata(i).paddr 344 val rdataSel = LookupTree(raddr(2, 0), List( 345 "b000".U -> rdata(63, 0), 346 "b001".U -> rdata(63, 8), 347 "b010".U -> rdata(63, 16), 348 "b011".U -> rdata(63, 24), 349 "b100".U -> rdata(63, 32), 350 "b101".U -> rdata(63, 40), 351 "b110".U -> rdata(63, 48), 352 "b111".U -> rdata(63, 56) 353 )) 354 val rdataPartialLoad = rdataHelper(seluop, rdataSel) 355 356 // writeback missed int/fp load 357 // 358 // Int load writeback will finish (if not blocked) in one cycle 359 io.ldout(i).bits.uop := seluop 360 io.ldout(i).bits.uop.cf.exceptionVec := selectLoad(dataModule.io.wb.rdata(i).exception) 361 io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr) 362 io.ldout(i).bits.data := rdataPartialLoad 363 io.ldout(i).bits.redirectValid := false.B 364 io.ldout(i).bits.redirect := DontCare 365 io.ldout(i).bits.brUpdate := DontCare 366 io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i)) 367 io.ldout(i).bits.fflags := DontCare 368 io.ldout(i).valid := loadWbSelV(i) 369 370 when(io.ldout(i).fire()) { 371 XSInfo("int load miss write to cbd roqidx %d lqidx %d pc 0x%x paddr %x data %x mmio %x\n", 372 io.ldout(i).bits.uop.roqIdx.asUInt, 373 io.ldout(i).bits.uop.lqIdx.asUInt, 374 io.ldout(i).bits.uop.cf.pc, 375 dataModule.io.debug(loadWbSel(i)).paddr, 376 dataModule.io.debug(loadWbSel(i)).data, 377 debug_mmio(loadWbSel(i)) 378 ) 379 } 380 381 }) 382 383 /** 384 * Load commits 385 * 386 * When load commited, mark it as !allocated and move deqPtrExt forward. 387 */ 388 (0 until CommitWidth).map(i => { 389 when(loadCommit(i)) { 390 allocated(mcommitIdx(i)) := false.B 391 XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc) 392 } 393 }) 394 395 def getFirstOne(mask: Vec[Bool], startMask: UInt) = { 396 val length = mask.length 397 val highBits = (0 until length).map(i => mask(i) & ~startMask(i)) 398 val highBitsUint = Cat(highBits.reverse) 399 PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt)) 400 } 401 402 def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = { 403 assert(valid.length == uop.length) 404 assert(valid.length == 2) 405 Mux(valid(0) && valid(1), 406 Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)), 407 Mux(valid(0) && !valid(1), uop(0), uop(1))) 408 } 409 410 def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = { 411 assert(valid.length == uop.length) 412 val length = valid.length 413 (0 until length).map(i => { 414 (0 until length).map(j => { 415 Mux(valid(i) && valid(j), 416 isAfter(uop(i).roqIdx, uop(j).roqIdx), 417 Mux(!valid(i), true.B, false.B)) 418 }) 419 }) 420 } 421 422 /** 423 * Memory violation detection 424 * 425 * When store writes back, it searches LoadQueue for younger load instructions 426 * with the same load physical address. They loaded wrong data and need re-execution. 427 * 428 * Cycle 0: Store Writeback 429 * Generate match vector for store address with rangeMask(stPtr, enqPtr). 430 * Besides, load instructions in LoadUnit_S1 and S2 are also checked. 431 * Cycle 1: Redirect Generation 432 * There're three possible types of violations. Choose the oldest load. 433 * Set io.redirect according to the detected violation. 434 */ 435 io.load_s1 := DontCare 436 def detectRollback(i: Int) = { 437 val startIndex = io.storeIn(i).bits.uop.lqIdx.value 438 val lqIdxMask = UIntToMask(startIndex, LoadQueueSize) 439 val xorMask = lqIdxMask ^ enqMask 440 val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag 441 val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask) 442 443 // check if load already in lq needs to be rolledback 444 dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr 445 dataModule.io.violation(i).mask := io.storeIn(i).bits.mask 446 val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask) 447 val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => { 448 allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j)) 449 }))) 450 val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => { 451 addrMaskMatch(j) && entryNeedCheck(j) 452 })) 453 val lqViolation = lqViolationVec.asUInt().orR() 454 val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask)) 455 val lqViolationUop = uop(lqViolationIndex) 456 // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag 457 // lqViolationUop.lqIdx.value := lqViolationIndex 458 XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n") 459 460 // when l/s writeback to roq together, check if rollback is needed 461 val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => { 462 io.loadIn(j).valid && 463 isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) && 464 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) && 465 (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR 466 }))) 467 val wbViolation = wbViolationVec.asUInt().orR() 468 val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop)))) 469 XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n") 470 471 // check if rollback is needed for load in l1 472 val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => { 473 io.load_s1(j).valid && // L1 valid 474 isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) && 475 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) && 476 (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR 477 }))) 478 val l1Violation = l1ViolationVec.asUInt().orR() 479 val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop)))) 480 XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n") 481 482 val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation) 483 val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop) 484 485 val mask = getAfterMask(rollbackValidVec, rollbackUopVec) 486 val oneAfterZero = mask(1)(0) 487 val rollbackUop = Mux(oneAfterZero && mask(2)(0), 488 rollbackUopVec(0), 489 Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2))) 490 491 XSDebug( 492 l1Violation, 493 "need rollback (l4 load) pc %x roqidx %d target %x\n", 494 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt 495 ) 496 XSDebug( 497 lqViolation, 498 "need rollback (ld wb before store) pc %x roqidx %d target %x\n", 499 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt 500 ) 501 XSDebug( 502 wbViolation, 503 "need rollback (ld/st wb together) pc %x roqidx %d target %x\n", 504 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt 505 ) 506 507 (RegNext(io.storeIn(i).valid) && Cat(rollbackValidVec).orR, rollbackUop) 508 } 509 510 // rollback check 511 val rollback = Wire(Vec(StorePipelineWidth, Valid(new MicroOp))) 512 for (i <- 0 until StorePipelineWidth) { 513 val detectedRollback = detectRollback(i) 514 rollback(i).valid := detectedRollback._1 515 rollback(i).bits := detectedRollback._2 516 } 517 518 def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = { 519 Mux( 520 a.valid, 521 Mux( 522 b.valid, 523 Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest 524 a // sel a 525 ), 526 b // sel b 527 ) 528 } 529 530 val rollbackSelected = ParallelOperation(rollback, rollbackSel) 531 val lastCycleRedirect = RegNext(io.brqRedirect) 532 533 // Note that we use roqIdx - 1.U to flush the load instruction itself. 534 // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect. 535 io.rollback.valid := rollbackSelected.valid && 536 (!lastCycleRedirect.valid || !isAfter(rollbackSelected.bits.roqIdx, lastCycleRedirect.bits.roqIdx)) && 537 !(lastCycleRedirect.valid && lastCycleRedirect.bits.isUnconditional()) 538 539 io.rollback.bits.roqIdx := rollbackSelected.bits.roqIdx 540 io.rollback.bits.level := RedirectLevel.flush 541 io.rollback.bits.interrupt := DontCare 542 io.rollback.bits.pc := DontCare 543 io.rollback.bits.target := rollbackSelected.bits.cf.pc 544 io.rollback.bits.brTag := rollbackSelected.bits.brTag 545 546 when(io.rollback.valid) { 547 XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt) 548 } 549 550 /** 551 * Memory mapped IO / other uncached operations 552 * 553 */ 554 io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) && 555 io.commits.info(0).commitType === CommitType.LOAD && 556 io.roqDeqPtr === uop(deqPtr).roqIdx && 557 !io.commits.isWalk 558 559 dataModule.io.uncache.raddr := deqPtr 560 561 io.uncache.req.bits.cmd := MemoryOpConstants.M_XRD 562 io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr 563 io.uncache.req.bits.data := dataModule.io.uncache.rdata.data 564 io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask 565 566 io.uncache.req.bits.meta.id := DontCare 567 io.uncache.req.bits.meta.vaddr := DontCare 568 io.uncache.req.bits.meta.paddr := dataModule.io.uncache.rdata.paddr 569 io.uncache.req.bits.meta.uop := uop(deqPtr) 570 io.uncache.req.bits.meta.mmio := true.B 571 io.uncache.req.bits.meta.tlb_miss := false.B 572 io.uncache.req.bits.meta.mask := dataModule.io.uncache.rdata.mask 573 io.uncache.req.bits.meta.replay := false.B 574 575 io.uncache.resp.ready := true.B 576 577 when (io.uncache.req.fire()) { 578 pending(deqPtr) := false.B 579 580 XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n", 581 uop(deqPtr).cf.pc, 582 io.uncache.req.bits.addr, 583 io.uncache.req.bits.data, 584 io.uncache.req.bits.cmd, 585 io.uncache.req.bits.mask 586 ) 587 } 588 589 dataModule.io.uncache.wen := false.B 590 when(io.uncache.resp.fire()){ 591 datavalid(deqPtr) := true.B 592 dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0)) 593 dataModule.io.uncache.wen := true.B 594 595 XSDebug("uncache resp: data %x\n", io.dcache.bits.data) 596 } 597 598 // Read vaddr for mem exception 599 vaddrModule.io.raddr(0) := io.exceptionAddr.lsIdx.lqIdx.value 600 io.exceptionAddr.vaddr := vaddrModule.io.rdata(0) 601 602 // misprediction recovery / exception redirect 603 // invalidate lq term using robIdx 604 val needCancel = Wire(Vec(LoadQueueSize, Bool())) 605 for (i <- 0 until LoadQueueSize) { 606 needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i) 607 when (needCancel(i)) { 608 allocated(i) := false.B 609 } 610 } 611 612 /** 613 * update pointers 614 */ 615 val lastCycleCancelCount = PopCount(RegNext(needCancel)) 616 // when io.brqRedirect.valid, we don't allow eneuque even though it may fire. 617 val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U) 618 when (lastCycleRedirect.valid) { 619 // we recover the pointers in the next cycle after redirect 620 enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount)) 621 }.otherwise { 622 enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber)) 623 } 624 625 val commitCount = PopCount(loadCommit) 626 deqPtrExt := deqPtrExt + commitCount 627 628 val lastLastCycleRedirect = RegNext(lastCycleRedirect.valid) 629 val trueValidCounter = distanceBetween(enqPtrExt(0), deqPtrExt) 630 validCounter := Mux(lastLastCycleRedirect, 631 trueValidCounter, 632 validCounter + enqNumber - commitCount 633 ) 634 635 allowEnqueue := Mux(io.brqRedirect.valid, 636 false.B, 637 Mux(lastLastCycleRedirect, 638 trueValidCounter <= (LoadQueueSize - RenameWidth).U, 639 validCounter + enqNumber <= (LoadQueueSize - RenameWidth).U 640 ) 641 ) 642 643 // debug info 644 XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr) 645 646 def PrintFlag(flag: Bool, name: String): Unit = { 647 when(flag) { 648 XSDebug(false, true.B, name) 649 }.otherwise { 650 XSDebug(false, true.B, " ") 651 } 652 } 653 654 for (i <- 0 until LoadQueueSize) { 655 if (i % 4 == 0) XSDebug("") 656 XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.debug(i).paddr) 657 PrintFlag(allocated(i), "a") 658 PrintFlag(allocated(i) && datavalid(i), "v") 659 PrintFlag(allocated(i) && writebacked(i), "w") 660 PrintFlag(allocated(i) && commited(i), "c") 661 PrintFlag(allocated(i) && miss(i), "m") 662 // PrintFlag(allocated(i) && listening(i), "l") 663 PrintFlag(allocated(i) && pending(i), "p") 664 XSDebug(false, true.B, " ") 665 if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n") 666 } 667 668} 669