1package xiangshan.mem 2 3import chisel3._ 4import chisel3.util._ 5import utils._ 6import xiangshan._ 7import xiangshan.cache._ 8import xiangshan.cache.{DCacheWordIO, DCacheLineIO, TlbRequestIO, MemoryOpConstants} 9import xiangshan.backend.LSUOpType 10import xiangshan.backend.roq.RoqPtr 11 12 13class SqPtr extends CircularQueuePtr(SqPtr.StoreQueueSize) { } 14 15object SqPtr extends HasXSParameter { 16 def apply(f: Bool, v: UInt): SqPtr = { 17 val ptr = Wire(new SqPtr) 18 ptr.flag := f 19 ptr.value := v 20 ptr 21 } 22} 23 24// Store Queue 25class StoreQueue extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper { 26 val io = IO(new Bundle() { 27 val enq = new Bundle() { 28 val canAccept = Output(Bool()) 29 val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp))) 30 val resp = Vec(RenameWidth, Output(new SqPtr)) 31 } 32 val brqRedirect = Input(Valid(new Redirect)) 33 val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) 34 val sbuffer = Vec(StorePipelineWidth, Decoupled(new DCacheWordReq)) 35 val mmioStout = DecoupledIO(new ExuOutput) // writeback uncached store 36 val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO)) 37 val commits = Flipped(Vec(CommitWidth, Valid(new RoqCommit))) 38 val uncache = new DCacheWordIO 39 val roqDeqPtr = Input(new RoqPtr) 40 // val refill = Flipped(Valid(new DCacheLineReq )) 41 val oldestStore = Output(Valid(new RoqPtr)) 42 val exceptionAddr = new ExceptionAddrIO 43 }) 44 45 val uop = Reg(Vec(StoreQueueSize, new MicroOp)) 46 // val data = Reg(Vec(StoreQueueSize, new LsqEntry)) 47 val dataModule = Module(new LSQueueData(StoreQueueSize, StorePipelineWidth)) 48 dataModule.io := DontCare 49 val allocated = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // sq entry has been allocated 50 val datavalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio data is valid 51 val writebacked = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // inst has been writebacked to CDB 52 val commited = Reg(Vec(StoreQueueSize, Bool())) // inst has been commited by roq 53 val pending = Reg(Vec(StoreQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq 54 55 val enqPtrExt = RegInit(0.U.asTypeOf(new SqPtr)) 56 val deqPtrExt = RegInit(0.U.asTypeOf(new SqPtr)) 57 val enqPtr = enqPtrExt.value 58 val deqPtr = deqPtrExt.value 59 val sameFlag = enqPtrExt.flag === deqPtrExt.flag 60 val isEmpty = enqPtr === deqPtr && sameFlag 61 val isFull = enqPtr === deqPtr && !sameFlag 62 val allowIn = !isFull 63 64 val storeCommit = (0 until CommitWidth).map(i => io.commits(i).valid && !io.commits(i).bits.isWalk && io.commits(i).bits.uop.ctrl.commitType === CommitType.STORE) 65 val mcommitIdx = (0 until CommitWidth).map(i => io.commits(i).bits.uop.sqIdx.value) 66 67 val tailMask = (((1.U((StoreQueueSize + 1).W)) << deqPtr).asUInt - 1.U)(StoreQueueSize - 1, 0) 68 val headMask = (((1.U((StoreQueueSize + 1).W)) << enqPtr).asUInt - 1.U)(StoreQueueSize - 1, 0) 69 val enqDeqMask1 = tailMask ^ headMask 70 val enqDeqMask = Mux(sameFlag, enqDeqMask1, ~enqDeqMask1) 71 72 // Enqueue at dispatch 73 val validEntries = distanceBetween(enqPtrExt, deqPtrExt) 74 val firedDispatch = io.enq.req.map(_.valid) 75 io.enq.canAccept := validEntries <= (StoreQueueSize - RenameWidth).U 76 XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(firedDispatch))}\n") 77 for (i <- 0 until RenameWidth) { 78 val offset = if (i == 0) 0.U else PopCount((0 until i).map(firedDispatch(_))) 79 val sqIdx = enqPtrExt + offset 80 val index = sqIdx.value 81 when(io.enq.req(i).valid) { 82 uop(index) := io.enq.req(i).bits 83 allocated(index) := true.B 84 datavalid(index) := false.B 85 writebacked(index) := false.B 86 commited(index) := false.B 87 pending(index) := false.B 88 } 89 io.enq.resp(i) := sqIdx 90 91 XSError(!io.enq.canAccept && io.enq.req(i).valid, "should not valid when not ready\n") 92 } 93 94 when(Cat(firedDispatch).orR) { 95 enqPtrExt := enqPtrExt + PopCount(firedDispatch) 96 XSInfo("dispatched %d insts to sq\n", PopCount(firedDispatch)) 97 } 98 99 // writeback store 100 (0 until StorePipelineWidth).map(i => { 101 dataModule.io.wb(i).wen := false.B 102 when(io.storeIn(i).fire()) { 103 val stWbIndex = io.storeIn(i).bits.uop.sqIdx.value 104 val hasException = io.storeIn(i).bits.uop.cf.exceptionVec.asUInt.orR 105 val hasWritebacked = !io.storeIn(i).bits.mmio || hasException 106 datavalid(stWbIndex) := hasWritebacked 107 writebacked(stWbIndex) := hasWritebacked 108 pending(stWbIndex) := !hasWritebacked // valid mmio require 109 110 val storeWbData = Wire(new LsqEntry) 111 storeWbData := DontCare 112 storeWbData.paddr := io.storeIn(i).bits.paddr 113 storeWbData.vaddr := io.storeIn(i).bits.vaddr 114 storeWbData.mask := io.storeIn(i).bits.mask 115 storeWbData.data := io.storeIn(i).bits.data 116 storeWbData.mmio := io.storeIn(i).bits.mmio 117 storeWbData.exception := io.storeIn(i).bits.uop.cf.exceptionVec.asUInt 118 119 dataModule.io.wbWrite(i, stWbIndex, storeWbData) 120 dataModule.io.wb(i).wen := true.B 121 122 XSInfo("store write to sq idx %d pc 0x%x vaddr %x paddr %x data %x mmio %x roll %x exc %x\n", 123 io.storeIn(i).bits.uop.sqIdx.value, 124 io.storeIn(i).bits.uop.cf.pc, 125 io.storeIn(i).bits.vaddr, 126 io.storeIn(i).bits.paddr, 127 io.storeIn(i).bits.data, 128 io.storeIn(i).bits.mmio, 129 io.storeIn(i).bits.rollback, 130 io.storeIn(i).bits.uop.cf.exceptionVec.asUInt 131 ) 132 } 133 }) 134 135 def getFirstOne(mask: Vec[Bool], startMask: UInt) = { 136 val length = mask.length 137 val highBits = (0 until length).map(i => mask(i) & ~startMask(i)) 138 val highBitsUint = Cat(highBits.reverse) 139 PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt)) 140 } 141 142 def getFirstOneWithFlag(mask: Vec[Bool], startMask: UInt, startFlag: Bool) = { 143 val length = mask.length 144 val highBits = (0 until length).map(i => mask(i) & ~startMask(i)) 145 val highBitsUint = Cat(highBits.reverse) 146 val changeDirection = !highBitsUint.orR() 147 val index = PriorityEncoder(Mux(!changeDirection, highBitsUint, mask.asUInt)) 148 SqPtr(startFlag ^ changeDirection, index) 149 } 150 151 def selectFirstTwo(valid: Vec[Bool], startMask: UInt) = { 152 val selVec = Wire(Vec(2, UInt(log2Up(StoreQueueSize).W))) 153 val selValid = Wire(Vec(2, Bool())) 154 selVec(0) := getFirstOne(valid, startMask) 155 val firstSelMask = UIntToOH(selVec(0)) 156 val secondSelVec = VecInit((0 until valid.length).map(i => valid(i) && !firstSelMask(i))) 157 selVec(1) := getFirstOne(secondSelVec, startMask) 158 selValid(0) := Cat(valid).orR 159 selValid(1) := Cat(secondSelVec).orR 160 (selValid, selVec) 161 } 162 163 def selectFirstTwoRoughly(valid: Vec[Bool]) = { 164 // TODO: do not select according to seq, just select 2 valid bit randomly 165 val firstSelVec = valid 166 val notFirstVec = Wire(Vec(valid.length, Bool())) 167 (0 until valid.length).map(i => 168 notFirstVec(i) := (if(i != 0) { valid(i) || !notFirstVec(i) } else { false.B }) 169 ) 170 val secondSelVec = VecInit((0 until valid.length).map(i => valid(i) && !notFirstVec(i))) 171 172 val selVec = Wire(Vec(2, UInt(log2Up(valid.length).W))) 173 val selValid = Wire(Vec(2, Bool())) 174 selVec(0) := PriorityEncoder(firstSelVec) 175 selVec(1) := PriorityEncoder(secondSelVec) 176 selValid(0) := Cat(firstSelVec).orR 177 selValid(1) := Cat(secondSelVec).orR 178 (selValid, selVec) 179 } 180 181 // select the last writebacked instruction 182 val validStoreVec = VecInit((0 until StoreQueueSize).map(i => !(allocated(i) && datavalid(i)))) 183 val storeNotValid = SqPtr(false.B, getFirstOne(validStoreVec, tailMask)) 184 val storeValidIndex = (storeNotValid - 1.U).value 185 io.oldestStore.valid := allocated(deqPtrExt.value) && datavalid(deqPtrExt.value) && !commited(storeValidIndex) 186 io.oldestStore.bits := uop(storeValidIndex).roqIdx 187 188 // writeback finished mmio store 189 io.mmioStout.bits.uop := uop(deqPtr) 190 io.mmioStout.bits.uop.sqIdx := deqPtrExt 191 io.mmioStout.bits.uop.cf.exceptionVec := dataModule.io.rdata(deqPtr).exception.asBools 192 io.mmioStout.bits.data := dataModule.io.rdata(deqPtr).data 193 io.mmioStout.bits.redirectValid := false.B 194 io.mmioStout.bits.redirect := DontCare 195 io.mmioStout.bits.brUpdate := DontCare 196 io.mmioStout.bits.debug.isMMIO := true.B 197 io.mmioStout.bits.fflags := DontCare 198 io.mmioStout.valid := allocated(deqPtr) && datavalid(deqPtr) && !writebacked(deqPtr) // finished mmio store 199 when(io.mmioStout.fire()) { 200 writebacked(deqPtr) := true.B 201 allocated(deqPtr) := false.B // potential opt: move deqPtr immediately 202 } 203 204 // remove retired insts from sq, add retired store to sbuffer 205 206 // move tailPtr 207 // TailPtr slow recovery: recycle bubbles in store queue 208 // allocatedMask: dequeuePtr can go to the next 1-bit 209 val allocatedMask = VecInit((0 until StoreQueueSize).map(i => allocated(i) || !enqDeqMask(i))) 210 // find the first one from deqPtr (deqPtr) 211 val nextTail1 = getFirstOneWithFlag(allocatedMask, tailMask, deqPtrExt.flag) 212 val nextTail = Mux(Cat(allocatedMask).orR, nextTail1, enqPtrExt) 213 deqPtrExt := nextTail 214 215 // TailPtr fast recovery 216 // val tailRecycle = VecInit(List( 217 // io.uncache.resp.fire() || io.sbuffer(0).fire(), 218 // io.sbuffer(1).fire() 219 // )) 220 221 when(io.sbuffer(0).fire()){ 222 deqPtrExt := deqPtrExt + Mux(io.sbuffer(1).fire(), 2.U, 1.U) 223 } 224 225 // load forward query 226 // check over all lq entries and forward data from the first matched store 227 (0 until LoadPipelineWidth).map(i => { 228 io.forward(i).forwardMask := 0.U(8.W).asBools 229 io.forward(i).forwardData := DontCare 230 231 // Compare deqPtr (deqPtr) and forward.sqIdx, we have two cases: 232 // (1) if they have the same flag, we need to check range(tail, sqIdx) 233 // (2) if they have different flags, we need to check range(tail, LoadQueueSize) and range(0, sqIdx) 234 // Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, LoadQueueSize)) 235 // Forward2: Mux(same_flag, 0.U, range(0, sqIdx) ) 236 // i.e. forward1 is the target entries with the same flag bits and forward2 otherwise 237 238 val differentFlag = deqPtrExt.flag =/= io.forward(i).sqIdx.flag 239 val forwardMask = ((1.U((StoreQueueSize + 1).W)) << io.forward(i).sqIdx.value).asUInt - 1.U 240 val storeWritebackedVec = WireInit(VecInit(Seq.fill(StoreQueueSize)(false.B))) 241 for (j <- 0 until StoreQueueSize) { 242 storeWritebackedVec(j) := datavalid(j) && allocated(j) // all datavalid terms need to be checked 243 } 244 val needForward1 = Mux(differentFlag, ~tailMask, tailMask ^ forwardMask) & storeWritebackedVec.asUInt 245 val needForward2 = Mux(differentFlag, forwardMask, 0.U(StoreQueueSize.W)) & storeWritebackedVec.asUInt 246 247 XSDebug("" + i + " f1 %b f2 %b sqIdx %d pa %x\n", needForward1, needForward2, io.forward(i).sqIdx.asUInt, io.forward(i).paddr) 248 249 // do real fwd query 250 dataModule.io.forwardQuery( 251 channel = i, 252 paddr = io.forward(i).paddr, 253 needForward1 = needForward1, 254 needForward2 = needForward2 255 ) 256 257 io.forward(i).forwardMask := dataModule.io.forward(i).forwardMask 258 io.forward(i).forwardData := dataModule.io.forward(i).forwardData 259 }) 260 261 // When store commited, mark it as commited (will not be influenced by redirect), 262 (0 until CommitWidth).map(i => { 263 when(storeCommit(i)) { 264 commited(mcommitIdx(i)) := true.B 265 XSDebug("store commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc) 266 } 267 }) 268 269 (0 until 2).map(i => { 270 val ptr = (deqPtrExt + i.U).value 271 val mmio = dataModule.io.rdata(ptr).mmio 272 io.sbuffer(i).valid := allocated(ptr) && commited(ptr) && !mmio 273 io.sbuffer(i).bits.cmd := MemoryOpConstants.M_XWR 274 io.sbuffer(i).bits.addr := dataModule.io.rdata(ptr).paddr 275 io.sbuffer(i).bits.data := dataModule.io.rdata(ptr).data 276 io.sbuffer(i).bits.mask := dataModule.io.rdata(ptr).mask 277 io.sbuffer(i).bits.meta := DontCare 278 io.sbuffer(i).bits.meta.tlb_miss := false.B 279 io.sbuffer(i).bits.meta.uop := DontCare 280 io.sbuffer(i).bits.meta.mmio := mmio 281 io.sbuffer(i).bits.meta.mask := dataModule.io.rdata(ptr).mask 282 283 when(io.sbuffer(i).fire()) { 284 allocated(ptr) := false.B 285 XSDebug("sbuffer "+i+" fire: ptr %d\n", ptr) 286 } 287 }) 288 289 // Memory mapped IO / other uncached operations 290 291 // setup misc mem access req 292 // mask / paddr / data can be get from sq.data 293 val commitType = io.commits(0).bits.uop.ctrl.commitType 294 io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) && 295 commitType === CommitType.STORE && 296 io.roqDeqPtr === uop(deqPtr).roqIdx && 297 !io.commits(0).bits.isWalk 298 299 io.uncache.req.bits.cmd := MemoryOpConstants.M_XWR 300 io.uncache.req.bits.addr := dataModule.io.rdata(deqPtr).paddr 301 io.uncache.req.bits.data := dataModule.io.rdata(deqPtr).data 302 io.uncache.req.bits.mask := dataModule.io.rdata(deqPtr).mask 303 304 io.uncache.req.bits.meta.id := DontCare // TODO: // FIXME 305 io.uncache.req.bits.meta.vaddr := DontCare 306 io.uncache.req.bits.meta.paddr := dataModule.io.rdata(deqPtr).paddr 307 io.uncache.req.bits.meta.uop := uop(deqPtr) 308 io.uncache.req.bits.meta.mmio := true.B // dataModule.io.rdata(deqPtr).mmio 309 io.uncache.req.bits.meta.tlb_miss := false.B 310 io.uncache.req.bits.meta.mask := dataModule.io.rdata(deqPtr).mask 311 io.uncache.req.bits.meta.replay := false.B 312 313 io.uncache.resp.ready := true.B 314 315 when(io.uncache.req.fire()){ 316 pending(deqPtr) := false.B 317 } 318 319 when(io.uncache.resp.fire()){ 320 datavalid(deqPtr) := true.B // will be writeback to CDB in the next cycle 321 // TODO: write back exception info 322 } 323 324 when(io.uncache.req.fire()){ 325 XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n", 326 uop(deqPtr).cf.pc, 327 io.uncache.req.bits.addr, 328 io.uncache.req.bits.data, 329 io.uncache.req.bits.cmd, 330 io.uncache.req.bits.mask 331 ) 332 } 333 334 // Read vaddr for mem exception 335 io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.sqIdx.value).vaddr 336 337 // misprediction recovery / exception redirect 338 // invalidate sq term using robIdx 339 val needCancel = Wire(Vec(StoreQueueSize, Bool())) 340 for (i <- 0 until StoreQueueSize) { 341 needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i) 342 when(needCancel(i)) { 343 // when(io.brqRedirect.bits.isReplay){ 344 // datavalid(i) := false.B 345 // writebacked(i) := false.B 346 // pending(i) := false.B 347 // }.otherwise{ 348 allocated(i) := false.B 349 // } 350 } 351 } 352 when (io.brqRedirect.valid && io.brqRedirect.bits.isMisPred) { 353 enqPtrExt := enqPtrExt - PopCount(needCancel) 354 } 355 356 // debug info 357 XSDebug("head %d:%d tail %d:%d\n", enqPtrExt.flag, enqPtr, deqPtrExt.flag, deqPtr) 358 359 def PrintFlag(flag: Bool, name: String): Unit = { 360 when(flag) { 361 XSDebug(false, true.B, name) 362 }.otherwise { 363 XSDebug(false, true.B, " ") 364 } 365 } 366 367 for (i <- 0 until StoreQueueSize) { 368 if (i % 4 == 0) XSDebug("") 369 XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr) 370 PrintFlag(allocated(i), "a") 371 PrintFlag(allocated(i) && datavalid(i), "v") 372 PrintFlag(allocated(i) && writebacked(i), "w") 373 PrintFlag(allocated(i) && commited(i), "c") 374 PrintFlag(allocated(i) && pending(i), "p") 375 XSDebug(false, true.B, " ") 376 if (i % 4 == 3 || i == StoreQueueSize - 1) XSDebug(false, true.B, "\n") 377 } 378 379} 380