1package xiangshan.mem 2 3import chisel3._ 4import chisel3.util._ 5import utils._ 6import xiangshan._ 7import xiangshan.cache._ 8import xiangshan.cache.{DCacheWordIO, DCacheLineIO, TlbRequestIO, MemoryOpConstants} 9import xiangshan.backend.LSUOpType 10import xiangshan.mem._ 11import xiangshan.backend.roq.RoqPtr 12import xiangshan.backend.fu.fpu.boxF32ToF64 13 14 15class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { } 16 17object LqPtr extends HasXSParameter { 18 def apply(f: Bool, v: UInt): LqPtr = { 19 val ptr = Wire(new LqPtr) 20 ptr.flag := f 21 ptr.value := v 22 ptr 23 } 24} 25 26class LqEnqIO extends XSBundle { 27 val canAccept = Output(Bool()) 28 val sqCanAccept = Input(Bool()) 29 val needAlloc = Vec(RenameWidth, Input(Bool())) 30 val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp))) 31 val resp = Vec(RenameWidth, Output(new LqPtr)) 32} 33 34// Load Queue 35class LoadQueue extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper { 36 val io = IO(new Bundle() { 37 val enq = new LqEnqIO 38 val brqRedirect = Input(Valid(new Redirect)) 39 val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle))) 40 val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // FIXME: Valid() only 41 val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback load 42 val load_s1 = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO)) 43 val commits = Flipped(new RoqCommitIO) 44 val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store 45 val dcache = new DCacheLineIO 46 val uncache = new DCacheWordIO 47 val roqDeqPtr = Input(new RoqPtr) 48 val exceptionAddr = new ExceptionAddrIO 49 }) 50 51 val uop = Reg(Vec(LoadQueueSize, new MicroOp)) 52 // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry)) 53 val dataModule = Module(new LSQueueData(LoadQueueSize, LoadPipelineWidth)) 54 dataModule.io := DontCare 55 val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated 56 val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid 57 val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB 58 val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB 59 val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request 60 val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result 61 val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq 62 63 val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr)))) 64 val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr)) 65 val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W)) 66 val allowEnqueue = RegInit(true.B) 67 68 val enqPtr = enqPtrExt(0).value 69 val deqPtr = deqPtrExt.value 70 val sameFlag = enqPtrExt(0).flag === deqPtrExt.flag 71 val isEmpty = enqPtr === deqPtr && sameFlag 72 val isFull = enqPtr === deqPtr && !sameFlag 73 val allowIn = !isFull 74 75 val loadCommit = (0 until CommitWidth).map(i => io.commits.valid(i) && !io.commits.isWalk && io.commits.info(i).commitType === CommitType.LOAD) 76 val mcommitIdx = (0 until CommitWidth).map(i => io.commits.info(i).lqIdx.value) 77 78 val deqMask = UIntToMask(deqPtr, LoadQueueSize) 79 val enqMask = UIntToMask(enqPtr, LoadQueueSize) 80 81 /** 82 * Enqueue at dispatch 83 * 84 * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth) 85 */ 86 io.enq.canAccept := allowEnqueue 87 88 for (i <- 0 until RenameWidth) { 89 val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i)) 90 val lqIdx = enqPtrExt(offset) 91 val index = lqIdx.value 92 when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid) { 93 uop(index) := io.enq.req(i).bits 94 allocated(index) := true.B 95 datavalid(index) := false.B 96 writebacked(index) := false.B 97 commited(index) := false.B 98 miss(index) := false.B 99 listening(index) := false.B 100 pending(index) := false.B 101 } 102 io.enq.resp(i) := lqIdx 103 } 104 XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n") 105 106 /** 107 * Writeback load from load units 108 * 109 * Most load instructions writeback to regfile at the same time. 110 * However, 111 * (1) For an mmio instruction with exceptions, it writes back to ROB immediately. 112 * (2) For an mmio instruction without exceptions, it does not write back. 113 * The mmio instruction will be sent to lower level when it reaches ROB's head. 114 * After uncache response, it will write back through arbiter with loadUnit. 115 * (3) For cache misses, it is marked miss and sent to dcache later. 116 * After cache refills, it will write back through arbiter with loadUnit. 117 */ 118 for (i <- 0 until LoadPipelineWidth) { 119 dataModule.io.wb(i).wen := false.B 120 when(io.loadIn(i).fire()) { 121 when(io.loadIn(i).bits.miss) { 122 XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n", 123 io.loadIn(i).bits.uop.lqIdx.asUInt, 124 io.loadIn(i).bits.uop.cf.pc, 125 io.loadIn(i).bits.vaddr, 126 io.loadIn(i).bits.paddr, 127 io.loadIn(i).bits.data, 128 io.loadIn(i).bits.mask, 129 io.loadIn(i).bits.forwardData.asUInt, 130 io.loadIn(i).bits.forwardMask.asUInt, 131 io.loadIn(i).bits.mmio, 132 io.loadIn(i).bits.rollback, 133 io.loadIn(i).bits.uop.cf.exceptionVec.asUInt 134 ) 135 }.otherwise { 136 XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n", 137 io.loadIn(i).bits.uop.lqIdx.asUInt, 138 io.loadIn(i).bits.uop.cf.pc, 139 io.loadIn(i).bits.vaddr, 140 io.loadIn(i).bits.paddr, 141 io.loadIn(i).bits.data, 142 io.loadIn(i).bits.mask, 143 io.loadIn(i).bits.forwardData.asUInt, 144 io.loadIn(i).bits.forwardMask.asUInt, 145 io.loadIn(i).bits.mmio, 146 io.loadIn(i).bits.rollback, 147 io.loadIn(i).bits.uop.cf.exceptionVec.asUInt 148 ) 149 } 150 val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value 151 datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 152 writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 153 154 val loadWbData = Wire(new LsqEntry) 155 loadWbData.paddr := io.loadIn(i).bits.paddr 156 loadWbData.vaddr := io.loadIn(i).bits.vaddr 157 loadWbData.mask := io.loadIn(i).bits.mask 158 loadWbData.data := io.loadIn(i).bits.data // for mmio / misc / debug 159 loadWbData.mmio := io.loadIn(i).bits.mmio 160 loadWbData.fwdMask := io.loadIn(i).bits.forwardMask 161 loadWbData.fwdData := io.loadIn(i).bits.forwardData 162 loadWbData.exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt 163 dataModule.io.wbWrite(i, loadWbIndex, loadWbData) 164 dataModule.io.wb(i).wen := true.B 165 166 val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 167 miss(loadWbIndex) := dcacheMissed && !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR 168 listening(loadWbIndex) := dcacheMissed 169 pending(loadWbIndex) := io.loadIn(i).bits.mmio && !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR 170 } 171 } 172 173 /** 174 * Cache miss request 175 * 176 * (1) writeback: miss 177 * (2) send to dcache: listing 178 * (3) dcache response: datavalid 179 * (4) writeback to ROB: writeback 180 */ 181 val inflightReqs = RegInit(VecInit(Seq.fill(cfg.nLoadMissEntries)(0.U.asTypeOf(new InflightBlockInfo)))) 182 val inflightReqFull = inflightReqs.map(req => req.valid).reduce(_&&_) 183 val reqBlockIndex = PriorityEncoder(~VecInit(inflightReqs.map(req => req.valid)).asUInt) 184 185 val missRefillSelVec = VecInit( 186 (0 until LoadQueueSize).map{ i => 187 val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(dataModule.io.rdata(i).paddr)).reduce(_||_) 188 allocated(i) && miss(i) && !inflight 189 }) 190 191 val missRefillSel = getFirstOne(missRefillSelVec, deqMask) 192 val missRefillBlockAddr = get_block_addr(dataModule.io.rdata(missRefillSel).paddr) 193 io.dcache.req.valid := missRefillSelVec.asUInt.orR 194 io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD 195 io.dcache.req.bits.addr := missRefillBlockAddr 196 io.dcache.req.bits.data := DontCare 197 io.dcache.req.bits.mask := DontCare 198 199 io.dcache.req.bits.meta.id := DontCare 200 io.dcache.req.bits.meta.vaddr := DontCare // dataModule.io.rdata(missRefillSel).vaddr 201 io.dcache.req.bits.meta.paddr := missRefillBlockAddr 202 io.dcache.req.bits.meta.uop := uop(missRefillSel) 203 io.dcache.req.bits.meta.mmio := false.B // dataModule.io.rdata(missRefillSel).mmio 204 io.dcache.req.bits.meta.tlb_miss := false.B 205 io.dcache.req.bits.meta.mask := DontCare 206 io.dcache.req.bits.meta.replay := false.B 207 208 io.dcache.resp.ready := true.B 209 210 assert(!(dataModule.io.rdata(missRefillSel).mmio && io.dcache.req.valid)) 211 212 when(io.dcache.req.fire()) { 213 miss(missRefillSel) := false.B 214 listening(missRefillSel) := true.B 215 216 // mark this block as inflight 217 inflightReqs(reqBlockIndex).valid := true.B 218 inflightReqs(reqBlockIndex).block_addr := missRefillBlockAddr 219 assert(!inflightReqs(reqBlockIndex).valid) 220 } 221 222 when(io.dcache.resp.fire()) { 223 val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)).reduce(_||_) 224 assert(inflight) 225 for (i <- 0 until cfg.nLoadMissEntries) { 226 when (inflightReqs(i).valid && inflightReqs(i).block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)) { 227 inflightReqs(i).valid := false.B 228 } 229 } 230 } 231 232 233 when(io.dcache.req.fire()){ 234 XSDebug("miss req: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x vaddr:0x%x\n", 235 io.dcache.req.bits.meta.uop.cf.pc, io.dcache.req.bits.meta.uop.roqIdx.asUInt, io.dcache.req.bits.meta.uop.lqIdx.asUInt, 236 io.dcache.req.bits.addr, io.dcache.req.bits.meta.vaddr 237 ) 238 } 239 240 when(io.dcache.resp.fire()){ 241 XSDebug("miss resp: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x data %x\n", 242 io.dcache.resp.bits.meta.uop.cf.pc, io.dcache.resp.bits.meta.uop.roqIdx.asUInt, io.dcache.resp.bits.meta.uop.lqIdx.asUInt, 243 io.dcache.resp.bits.meta.paddr, io.dcache.resp.bits.data 244 ) 245 } 246 247 // Refill 64 bit in a cycle 248 // Refill data comes back from io.dcache.resp 249 dataModule.io.refill.dcache := io.dcache.resp.bits 250 251 (0 until LoadQueueSize).map(i => { 252 val blockMatch = get_block_addr(dataModule.io.rdata(i).paddr) === io.dcache.resp.bits.meta.paddr 253 dataModule.io.refill.wen(i) := false.B 254 when(allocated(i) && listening(i) && blockMatch && io.dcache.resp.fire()) { 255 dataModule.io.refill.wen(i) := true.B 256 datavalid(i) := true.B 257 listening(i) := false.B 258 } 259 }) 260 261 // writeback up to 2 missed load insts to CDB 262 // just randomly pick 2 missed load (data refilled), write them back to cdb 263 val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => { 264 allocated(i) && datavalid(i) && !writebacked(i) 265 })).asUInt() // use uint instead vec to reduce verilog lines 266 val loadWbSel = Wire(Vec(StorePipelineWidth, UInt(log2Up(LoadQueueSize).W))) 267 val loadWbSelV= Wire(Vec(StorePipelineWidth, Bool())) 268 val lselvec0 = PriorityEncoderOH(loadWbSelVec) 269 val lselvec1 = PriorityEncoderOH(loadWbSelVec & (~lselvec0).asUInt) 270 loadWbSel(0) := OHToUInt(lselvec0) 271 loadWbSelV(0):= lselvec0.orR 272 loadWbSel(1) := OHToUInt(lselvec1) 273 loadWbSelV(1) := lselvec1.orR 274 (0 until StorePipelineWidth).map(i => { 275 // data select 276 val rdata = dataModule.io.rdata(loadWbSel(i)).data 277 val func = uop(loadWbSel(i)).ctrl.fuOpType 278 val raddr = dataModule.io.rdata(loadWbSel(i)).paddr 279 val rdataSel = LookupTree(raddr(2, 0), List( 280 "b000".U -> rdata(63, 0), 281 "b001".U -> rdata(63, 8), 282 "b010".U -> rdata(63, 16), 283 "b011".U -> rdata(63, 24), 284 "b100".U -> rdata(63, 32), 285 "b101".U -> rdata(63, 40), 286 "b110".U -> rdata(63, 48), 287 "b111".U -> rdata(63, 56) 288 )) 289 val rdataPartialLoad = LookupTree(func, List( 290 LSUOpType.lb -> SignExt(rdataSel(7, 0) , XLEN), 291 LSUOpType.lh -> SignExt(rdataSel(15, 0), XLEN), 292 LSUOpType.lw -> SignExt(rdataSel(31, 0), XLEN), 293 LSUOpType.ld -> SignExt(rdataSel(63, 0), XLEN), 294 LSUOpType.lbu -> ZeroExt(rdataSel(7, 0) , XLEN), 295 LSUOpType.lhu -> ZeroExt(rdataSel(15, 0), XLEN), 296 LSUOpType.lwu -> ZeroExt(rdataSel(31, 0), XLEN), 297 LSUOpType.flw -> boxF32ToF64(rdataSel(31, 0)) 298 )) 299 io.ldout(i).bits.uop := uop(loadWbSel(i)) 300 io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(loadWbSel(i)).exception.asBools 301 io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr) 302 io.ldout(i).bits.data := rdataPartialLoad 303 io.ldout(i).bits.redirectValid := false.B 304 io.ldout(i).bits.redirect := DontCare 305 io.ldout(i).bits.brUpdate := DontCare 306 io.ldout(i).bits.debug.isMMIO := dataModule.io.rdata(loadWbSel(i)).mmio 307 io.ldout(i).bits.fflags := DontCare 308 io.ldout(i).valid := loadWbSelVec(loadWbSel(i)) && loadWbSelV(i) 309 when(io.ldout(i).fire()) { 310 writebacked(loadWbSel(i)) := true.B 311 XSInfo("load miss write to cbd roqidx %d lqidx %d pc 0x%x paddr %x data %x mmio %x\n", 312 io.ldout(i).bits.uop.roqIdx.asUInt, 313 io.ldout(i).bits.uop.lqIdx.asUInt, 314 io.ldout(i).bits.uop.cf.pc, 315 dataModule.io.rdata(loadWbSel(i)).paddr, 316 dataModule.io.rdata(loadWbSel(i)).data, 317 dataModule.io.rdata(loadWbSel(i)).mmio 318 ) 319 } 320 }) 321 322 /** 323 * Load commits 324 * 325 * When load commited, mark it as !allocated and move deqPtrExt forward. 326 */ 327 (0 until CommitWidth).map(i => { 328 when(loadCommit(i)) { 329 allocated(mcommitIdx(i)) := false.B 330 XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc) 331 } 332 }) 333 334 def getFirstOne(mask: Vec[Bool], startMask: UInt) = { 335 val length = mask.length 336 val highBits = (0 until length).map(i => mask(i) & ~startMask(i)) 337 val highBitsUint = Cat(highBits.reverse) 338 PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt)) 339 } 340 341 def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = { 342 assert(valid.length == uop.length) 343 assert(valid.length == 2) 344 Mux(valid(0) && valid(1), 345 Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)), 346 Mux(valid(0) && !valid(1), uop(0), uop(1))) 347 } 348 349 def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = { 350 assert(valid.length == uop.length) 351 val length = valid.length 352 (0 until length).map(i => { 353 (0 until length).map(j => { 354 Mux(valid(i) && valid(j), 355 isAfter(uop(i).roqIdx, uop(j).roqIdx), 356 Mux(!valid(i), true.B, false.B)) 357 }) 358 }) 359 } 360 361 /** 362 * Memory violation detection 363 * 364 * When store writes back, it searches LoadQueue for younger load instructions 365 * with the same load physical address. They loaded wrong data and need re-execution. 366 * 367 * Cycle 0: Store Writeback 368 * Generate match vector for store address with rangeMask(stPtr, enqPtr). 369 * Besides, load instructions in LoadUnit_S1 and S2 are also checked. 370 * Cycle 1: Redirect Generation 371 * There're three possible types of violations. Choose the oldest load. 372 * Set io.redirect according to the detected violation. 373 */ 374 io.load_s1 := DontCare 375 def detectRollback(i: Int) = { 376 val startIndex = io.storeIn(i).bits.uop.lqIdx.value 377 val lqIdxMask = UIntToMask(startIndex, LoadQueueSize) 378 val xorMask = lqIdxMask ^ enqMask 379 val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag 380 val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask) 381 382 // check if load already in lq needs to be rolledback 383 val lqViolationVec = RegNext(VecInit((0 until LoadQueueSize).map(j => { 384 val addrMatch = allocated(j) && 385 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === dataModule.io.rdata(j).paddr(PAddrBits - 1, 3) 386 val entryNeedCheck = toEnqPtrMask(j) && addrMatch && (datavalid(j) || listening(j) || miss(j)) 387 // TODO: update refilled data 388 val violationVec = (0 until 8).map(k => dataModule.io.rdata(j).mask(k) && io.storeIn(i).bits.mask(k)) 389 Cat(violationVec).orR() && entryNeedCheck 390 }))) 391 val lqViolation = lqViolationVec.asUInt().orR() 392 val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask)) 393 val lqViolationUop = uop(lqViolationIndex) 394 // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag 395 // lqViolationUop.lqIdx.value := lqViolationIndex 396 XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n") 397 398 // when l/s writeback to roq together, check if rollback is needed 399 val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => { 400 io.loadIn(j).valid && 401 isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) && 402 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) && 403 (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR 404 }))) 405 val wbViolation = wbViolationVec.asUInt().orR() 406 val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop)))) 407 XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n") 408 409 // check if rollback is needed for load in l1 410 val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => { 411 io.load_s1(j).valid && // L1 valid 412 isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) && 413 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) && 414 (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR 415 }))) 416 val l1Violation = l1ViolationVec.asUInt().orR() 417 val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop)))) 418 XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n") 419 420 val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation) 421 val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop) 422 423 val mask = getAfterMask(rollbackValidVec, rollbackUopVec) 424 val oneAfterZero = mask(1)(0) 425 val rollbackUop = Mux(oneAfterZero && mask(2)(0), 426 rollbackUopVec(0), 427 Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2))) 428 429 XSDebug( 430 l1Violation, 431 "need rollback (l4 load) pc %x roqidx %d target %x\n", 432 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt 433 ) 434 XSDebug( 435 lqViolation, 436 "need rollback (ld wb before store) pc %x roqidx %d target %x\n", 437 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt 438 ) 439 XSDebug( 440 wbViolation, 441 "need rollback (ld/st wb together) pc %x roqidx %d target %x\n", 442 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt 443 ) 444 445 (RegNext(io.storeIn(i).valid) && Cat(rollbackValidVec).orR, rollbackUop) 446 } 447 448 // rollback check 449 val rollback = Wire(Vec(StorePipelineWidth, Valid(new MicroOp))) 450 for (i <- 0 until StorePipelineWidth) { 451 val detectedRollback = detectRollback(i) 452 rollback(i).valid := detectedRollback._1 453 rollback(i).bits := detectedRollback._2 454 } 455 456 def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = { 457 Mux( 458 a.valid, 459 Mux( 460 b.valid, 461 Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest 462 a // sel a 463 ), 464 b // sel b 465 ) 466 } 467 468 val rollbackSelected = ParallelOperation(rollback, rollbackSel) 469 val lastCycleRedirect = RegNext(io.brqRedirect) 470 471 // Note that we use roqIdx - 1.U to flush the load instruction itself. 472 // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect. 473 io.rollback.valid := rollbackSelected.valid && 474 (!lastCycleRedirect.valid || !isAfter(rollbackSelected.bits.roqIdx, lastCycleRedirect.bits.roqIdx)) && 475 !(lastCycleRedirect.valid && lastCycleRedirect.bits.isUnconditional()) 476 477 io.rollback.bits.roqIdx := rollbackSelected.bits.roqIdx 478 io.rollback.bits.level := RedirectLevel.flush 479 io.rollback.bits.interrupt := DontCare 480 io.rollback.bits.pc := DontCare 481 io.rollback.bits.target := rollbackSelected.bits.cf.pc 482 io.rollback.bits.brTag := rollbackSelected.bits.brTag 483 484 when(io.rollback.valid) { 485 XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt) 486 } 487 488 /** 489 * Memory mapped IO / other uncached operations 490 * 491 */ 492 io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) && 493 io.commits.info(0).commitType === CommitType.LOAD && 494 io.roqDeqPtr === uop(deqPtr).roqIdx && 495 !io.commits.isWalk 496 497 io.uncache.req.bits.cmd := MemoryOpConstants.M_XRD 498 io.uncache.req.bits.addr := dataModule.io.rdata(deqPtr).paddr 499 io.uncache.req.bits.data := dataModule.io.rdata(deqPtr).data 500 io.uncache.req.bits.mask := dataModule.io.rdata(deqPtr).mask 501 502 io.uncache.req.bits.meta.id := DontCare 503 io.uncache.req.bits.meta.vaddr := DontCare 504 io.uncache.req.bits.meta.paddr := dataModule.io.rdata(deqPtr).paddr 505 io.uncache.req.bits.meta.uop := uop(deqPtr) 506 io.uncache.req.bits.meta.mmio := true.B 507 io.uncache.req.bits.meta.tlb_miss := false.B 508 io.uncache.req.bits.meta.mask := dataModule.io.rdata(deqPtr).mask 509 io.uncache.req.bits.meta.replay := false.B 510 511 io.uncache.resp.ready := true.B 512 513 when (io.uncache.req.fire()) { 514 pending(deqPtr) := false.B 515 516 XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n", 517 uop(deqPtr).cf.pc, 518 io.uncache.req.bits.addr, 519 io.uncache.req.bits.data, 520 io.uncache.req.bits.cmd, 521 io.uncache.req.bits.mask 522 ) 523 } 524 525 dataModule.io.uncache.wen := false.B 526 when(io.uncache.resp.fire()){ 527 datavalid(deqPtr) := true.B 528 dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0)) 529 dataModule.io.uncache.wen := true.B 530 531 XSDebug("uncache resp: data %x\n", io.dcache.resp.bits.data) 532 } 533 534 // Read vaddr for mem exception 535 io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.lqIdx.value).vaddr 536 537 // misprediction recovery / exception redirect 538 // invalidate lq term using robIdx 539 val needCancel = Wire(Vec(LoadQueueSize, Bool())) 540 for (i <- 0 until LoadQueueSize) { 541 needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i) 542 when (needCancel(i)) { 543 allocated(i) := false.B 544 } 545 } 546 547 /** 548 * update pointers 549 */ 550 val lastCycleCancelCount = PopCount(RegNext(needCancel)) 551 // when io.brqRedirect.valid, we don't allow eneuque even though it may fire. 552 val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U) 553 when (lastCycleRedirect.valid) { 554 // we recover the pointers in the next cycle after redirect 555 enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount)) 556 }.otherwise { 557 enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber)) 558 } 559 560 val commitCount = PopCount(loadCommit) 561 deqPtrExt := deqPtrExt + commitCount 562 563 val lastLastCycleRedirect = RegNext(lastCycleRedirect.valid) 564 val trueValidCounter = distanceBetween(enqPtrExt(0), deqPtrExt) 565 validCounter := Mux(lastLastCycleRedirect, 566 trueValidCounter, 567 validCounter + enqNumber - commitCount 568 ) 569 570 allowEnqueue := Mux(io.brqRedirect.valid, 571 false.B, 572 Mux(lastLastCycleRedirect, 573 trueValidCounter <= (LoadQueueSize - RenameWidth).U, 574 validCounter + enqNumber <= (LoadQueueSize - RenameWidth).U 575 ) 576 ) 577 578 // debug info 579 XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr) 580 581 def PrintFlag(flag: Bool, name: String): Unit = { 582 when(flag) { 583 XSDebug(false, true.B, name) 584 }.otherwise { 585 XSDebug(false, true.B, " ") 586 } 587 } 588 589 for (i <- 0 until LoadQueueSize) { 590 if (i % 4 == 0) XSDebug("") 591 XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr) 592 PrintFlag(allocated(i), "a") 593 PrintFlag(allocated(i) && datavalid(i), "v") 594 PrintFlag(allocated(i) && writebacked(i), "w") 595 PrintFlag(allocated(i) && commited(i), "c") 596 PrintFlag(allocated(i) && miss(i), "m") 597 PrintFlag(allocated(i) && listening(i), "l") 598 PrintFlag(allocated(i) && pending(i), "p") 599 XSDebug(false, true.B, " ") 600 if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n") 601 } 602 603} 604