1/*************************************************************************************** 2* Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC) 3* Copyright (c) 2020-2024 Institute of Computing Technology, Chinese Academy of Sciences 4* Copyright (c) 2020-2021 Peng Cheng Laboratory 5* 6* XiangShan is licensed under Mulan PSL v2. 7* You can use this software according to the terms and conditions of the Mulan PSL v2. 8* You may obtain a copy of Mulan PSL v2 at: 9* http://license.coscl.org.cn/MulanPSL2 10* 11* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 12* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 13* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 14* 15* See the Mulan PSL v2 for more details. 16***************************************************************************************/ 17 18package xiangshan.mem 19 20import chisel3._ 21import chisel3.util._ 22import difftest._ 23import difftest.common.DifftestMem 24import org.chipsalliance.cde.config.Parameters 25import utility._ 26import utils._ 27import xiangshan._ 28import xiangshan.cache._ 29import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants} 30import xiangshan.cache.{CMOReq, CMOResp} 31import xiangshan.backend._ 32import xiangshan.backend.rob.{RobLsqIO, RobPtr} 33import xiangshan.backend.Bundles.{DynInst, MemExuOutput} 34import xiangshan.backend.decode.isa.bitfield.{Riscv32BitInst, XSInstBitFields} 35import xiangshan.backend.fu.FuConfig._ 36import xiangshan.backend.fu.FuType 37import xiangshan.ExceptionNO._ 38 39class SqPtr(implicit p: Parameters) extends CircularQueuePtr[SqPtr]( 40 p => p(XSCoreParamsKey).StoreQueueSize 41){ 42} 43 44object SqPtr { 45 def apply(f: Bool, v: UInt)(implicit p: Parameters): SqPtr = { 46 val ptr = Wire(new SqPtr) 47 ptr.flag := f 48 ptr.value := v 49 ptr 50 } 51} 52 53class SqEnqIO(implicit p: Parameters) extends MemBlockBundle { 54 val canAccept = Output(Bool()) 55 val lqCanAccept = Input(Bool()) 56 val needAlloc = Vec(LSQEnqWidth, Input(Bool())) 57 val req = Vec(LSQEnqWidth, Flipped(ValidIO(new DynInst))) 58 val resp = Vec(LSQEnqWidth, Output(new SqPtr)) 59} 60 61class DataBufferEntry (implicit p: Parameters) extends DCacheBundle { 62 val addr = UInt(PAddrBits.W) 63 val vaddr = UInt(VAddrBits.W) 64 val data = UInt(VLEN.W) 65 val mask = UInt((VLEN/8).W) 66 val wline = Bool() 67 val sqPtr = new SqPtr 68 val prefetch = Bool() 69 val vecValid = Bool() 70 val sqNeedDeq = Bool() 71} 72 73class StoreExceptionBuffer(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper { 74 // The 1st StorePipelineWidth ports: sta exception generated at s1, except for af 75 // The 2nd StorePipelineWidth ports: sta af generated at s2 76 // The following VecStorePipelineWidth ports: vector st exception 77 // The last port: non-data error generated in SoC 78 val enqPortNum = StorePipelineWidth * 2 + VecStorePipelineWidth + 1 79 80 val io = IO(new Bundle() { 81 val redirect = Flipped(ValidIO(new Redirect)) 82 val storeAddrIn = Vec(enqPortNum, Flipped(ValidIO(new LsPipelineBundle()))) 83 val exceptionAddr = new ExceptionAddrIO 84 }) 85 86 val req_valid = RegInit(false.B) 87 val req = Reg(new LsPipelineBundle()) 88 89 // enqueue 90 // S1: 91 val s1_req = VecInit(io.storeAddrIn.map(_.bits)) 92 val s1_valid = VecInit(io.storeAddrIn.map(x => 93 x.valid && !x.bits.uop.robIdx.needFlush(io.redirect) && ExceptionNO.selectByFu(x.bits.uop.exceptionVec, StaCfg).asUInt.orR 94 )) 95 96 // S2: delay 1 cycle 97 val s2_req = (0 until enqPortNum).map(i => 98 RegEnable(s1_req(i), s1_valid(i))) 99 val s2_valid = (0 until enqPortNum).map(i => 100 RegNext(s1_valid(i)) && !s2_req(i).uop.robIdx.needFlush(io.redirect) 101 ) 102 103 val s2_enqueue = Wire(Vec(enqPortNum, Bool())) 104 for (w <- 0 until enqPortNum) { 105 s2_enqueue(w) := s2_valid(w) 106 } 107 108 when (req_valid && req.uop.robIdx.needFlush(io.redirect)) { 109 req_valid := s2_enqueue.asUInt.orR 110 }.elsewhen (s2_enqueue.asUInt.orR) { 111 req_valid := true.B 112 } 113 114 def selectOldest[T <: LsPipelineBundle](valid: Seq[Bool], bits: Seq[T]): (Seq[Bool], Seq[T]) = { 115 assert(valid.length == bits.length) 116 if (valid.length == 0 || valid.length == 1) { 117 (valid, bits) 118 } else if (valid.length == 2) { 119 val res = Seq.fill(2)(Wire(Valid(chiselTypeOf(bits(0))))) 120 for (i <- res.indices) { 121 res(i).valid := valid(i) 122 res(i).bits := bits(i) 123 } 124 val oldest = Mux(valid(0) && valid(1), 125 Mux(isAfter(bits(0).uop.robIdx, bits(1).uop.robIdx) || 126 (bits(0).uop.robIdx === bits(1).uop.robIdx && bits(0).uop.uopIdx > bits(1).uop.uopIdx), res(1), res(0)), 127 Mux(valid(0) && !valid(1), res(0), res(1))) 128 (Seq(oldest.valid), Seq(oldest.bits)) 129 } else { 130 val left = selectOldest(valid.take(valid.length / 2), bits.take(bits.length / 2)) 131 val right = selectOldest(valid.takeRight(valid.length - (valid.length / 2)), bits.takeRight(bits.length - (bits.length / 2))) 132 selectOldest(left._1 ++ right._1, left._2 ++ right._2) 133 } 134 } 135 136 val reqSel = selectOldest(s2_enqueue, s2_req) 137 138 when (req_valid) { 139 req := Mux( 140 reqSel._1(0) && (isAfter(req.uop.robIdx, reqSel._2(0).uop.robIdx) || (isNotBefore(req.uop.robIdx, reqSel._2(0).uop.robIdx) && req.uop.uopIdx > reqSel._2(0).uop.uopIdx)), 141 reqSel._2(0), 142 req) 143 } .elsewhen (s2_enqueue.asUInt.orR) { 144 req := reqSel._2(0) 145 } 146 147 io.exceptionAddr.vaddr := req.fullva 148 io.exceptionAddr.vaNeedExt := req.vaNeedExt 149 io.exceptionAddr.isHyper := req.isHyper 150 io.exceptionAddr.gpaddr := req.gpaddr 151 io.exceptionAddr.vstart := req.uop.vpu.vstart 152 io.exceptionAddr.vl := req.uop.vpu.vl 153 io.exceptionAddr.isForVSnonLeafPTE := req.isForVSnonLeafPTE 154 155} 156 157// Store Queue 158class StoreQueue(implicit p: Parameters) extends XSModule 159 with HasDCacheParameters 160 with HasCircularQueuePtrHelper 161 with HasPerfEvents 162 with HasVLSUParameters { 163 val io = IO(new Bundle() { 164 val hartId = Input(UInt(hartIdLen.W)) 165 val enq = new SqEnqIO 166 val brqRedirect = Flipped(ValidIO(new Redirect)) 167 val vecFeedback = Vec(VecLoadPipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO))) 168 val storeAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // store addr, data is not included 169 val storeAddrInRe = Vec(StorePipelineWidth, Input(new LsPipelineBundle())) // store more mmio and exception 170 val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new MemExuOutput(isVector = true)))) // store data, send to sq from rs 171 val storeMaskIn = Vec(StorePipelineWidth, Flipped(Valid(new StoreMaskBundle))) // store mask, send to sq from rs 172 val sbuffer = Vec(EnsbufferWidth, Decoupled(new DCacheWordReqWithVaddrAndPfFlag)) // write committed store to sbuffer 173 val sbufferVecDifftestInfo = Vec(EnsbufferWidth, Decoupled(new DynInst)) // The vector store difftest needs is, write committed store to sbuffer 174 val uncacheOutstanding = Input(Bool()) 175 val cmoOpReq = DecoupledIO(new CMOReq) 176 val cmoOpResp = Flipped(DecoupledIO(new CMOResp)) 177 val mmioStout = DecoupledIO(new MemExuOutput) // writeback uncached store 178 val vecmmioStout = DecoupledIO(new MemExuOutput(isVector = true)) 179 val forward = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO)) 180 // TODO: scommit is only for scalar store 181 val rob = Flipped(new RobLsqIO) 182 val uncache = new UncacheWordIO 183 // val refill = Flipped(Valid(new DCacheLineReq )) 184 val exceptionAddr = new ExceptionAddrIO 185 val flushSbuffer = new SbufferFlushBundle 186 val sqEmpty = Output(Bool()) 187 val stAddrReadySqPtr = Output(new SqPtr) 188 val stAddrReadyVec = Output(Vec(StoreQueueSize, Bool())) 189 val stDataReadySqPtr = Output(new SqPtr) 190 val stDataReadyVec = Output(Vec(StoreQueueSize, Bool())) 191 val stIssuePtr = Output(new SqPtr) 192 val sqDeqPtr = Output(new SqPtr) 193 val sqFull = Output(Bool()) 194 val sqCancelCnt = Output(UInt(log2Up(StoreQueueSize + 1).W)) 195 val sqDeq = Output(UInt(log2Ceil(EnsbufferWidth + 1).W)) 196 val force_write = Output(Bool()) 197 val maControl = Flipped(new StoreMaBufToSqControlIO) 198 }) 199 200 println("StoreQueue: size:" + StoreQueueSize) 201 202 // data modules 203 val uop = Reg(Vec(StoreQueueSize, new DynInst)) 204 // val data = Reg(Vec(StoreQueueSize, new LsqEntry)) 205 val dataModule = Module(new SQDataModule( 206 numEntries = StoreQueueSize, 207 numRead = EnsbufferWidth, 208 numWrite = StorePipelineWidth, 209 numForward = LoadPipelineWidth 210 )) 211 dataModule.io := DontCare 212 val paddrModule = Module(new SQAddrModule( 213 dataWidth = PAddrBits, 214 numEntries = StoreQueueSize, 215 numRead = EnsbufferWidth, 216 numWrite = StorePipelineWidth, 217 numForward = LoadPipelineWidth 218 )) 219 paddrModule.io := DontCare 220 val vaddrModule = Module(new SQAddrModule( 221 dataWidth = VAddrBits, 222 numEntries = StoreQueueSize, 223 numRead = EnsbufferWidth, // sbuffer; badvaddr will be sent from exceptionBuffer 224 numWrite = StorePipelineWidth, 225 numForward = LoadPipelineWidth 226 )) 227 vaddrModule.io := DontCare 228 val dataBuffer = Module(new DatamoduleResultBuffer(new DataBufferEntry)) 229 val difftestBuffer = if (env.EnableDifftest) Some(Module(new DatamoduleResultBuffer(new DynInst))) else None 230 val exceptionBuffer = Module(new StoreExceptionBuffer) 231 exceptionBuffer.io.redirect := io.brqRedirect 232 exceptionBuffer.io.exceptionAddr.isStore := DontCare 233 // vlsu exception! 234 for (i <- 0 until VecStorePipelineWidth) { 235 exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).valid := io.vecFeedback(i).valid && io.vecFeedback(i).bits.feedback(VecFeedbacks.FLUSH) // have exception 236 exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits := DontCare 237 exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.fullva := io.vecFeedback(i).bits.vaddr 238 exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.vaNeedExt := io.vecFeedback(i).bits.vaNeedExt 239 exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.gpaddr := io.vecFeedback(i).bits.gpaddr 240 exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.uopIdx := io.vecFeedback(i).bits.uopidx 241 exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.robIdx := io.vecFeedback(i).bits.robidx 242 exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.vpu.vstart := io.vecFeedback(i).bits.vstart 243 exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.vpu.vl := io.vecFeedback(i).bits.vl 244 exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.isForVSnonLeafPTE := io.vecFeedback(i).bits.isForVSnonLeafPTE 245 exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.exceptionVec := io.vecFeedback(i).bits.exceptionVec 246 } 247 248 249 val debug_paddr = Reg(Vec(StoreQueueSize, UInt((PAddrBits).W))) 250 val debug_vaddr = Reg(Vec(StoreQueueSize, UInt((VAddrBits).W))) 251 val debug_data = Reg(Vec(StoreQueueSize, UInt((XLEN).W))) 252 253 // state & misc 254 val allocated = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // sq entry has been allocated 255 val addrvalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) 256 val datavalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) 257 val allvalid = VecInit((0 until StoreQueueSize).map(i => addrvalid(i) && datavalid(i))) 258 val committed = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // inst has been committed by rob 259 val unaligned = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // unaligned store 260 val cross16Byte = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // unaligned cross 16Byte boundary 261 val pending = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob 262 val nc = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // nc: inst is a nc inst 263 val mmio = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // mmio: inst is an mmio inst 264 val atomic = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) 265 val prefetch = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // need prefetch when committing this store to sbuffer? 266 val isVec = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // vector store instruction 267 val vecLastFlow = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // last uop the last flow of vector store instruction 268 val vecMbCommit = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // vector store committed from merge buffer to rob 269 val hasException = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // store has exception, should deq but not write sbuffer 270 val waitStoreS2 = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // wait for mmio and exception result until store_s2 271 // val vec_robCommit = Reg(Vec(StoreQueueSize, Bool())) // vector store committed by rob 272 // val vec_secondInv = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // Vector unit-stride, second entry is invalid 273 val vecExceptionFlag = RegInit(0.U.asTypeOf(Valid(new DynInst))) 274 275 // ptr 276 val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new SqPtr)))) 277 val rdataPtrExt = RegInit(VecInit((0 until EnsbufferWidth).map(_.U.asTypeOf(new SqPtr)))) 278 val deqPtrExt = RegInit(VecInit((0 until EnsbufferWidth).map(_.U.asTypeOf(new SqPtr)))) 279 val cmtPtrExt = RegInit(VecInit((0 until CommitWidth).map(_.U.asTypeOf(new SqPtr)))) 280 val addrReadyPtrExt = RegInit(0.U.asTypeOf(new SqPtr)) 281 val dataReadyPtrExt = RegInit(0.U.asTypeOf(new SqPtr)) 282 283 val enqPtr = enqPtrExt(0).value 284 val deqPtr = deqPtrExt(0).value 285 val cmtPtr = cmtPtrExt(0).value 286 287 val validCount = distanceBetween(enqPtrExt(0), deqPtrExt(0)) 288 val allowEnqueue = validCount <= (StoreQueueSize - LSQStEnqWidth).U 289 290 val deqMask = UIntToMask(deqPtr, StoreQueueSize) 291 val enqMask = UIntToMask(enqPtr, StoreQueueSize) 292 293 val commitCount = WireInit(0.U(log2Ceil(CommitWidth + 1).W)) 294 val scommit = GatedRegNext(io.rob.scommit) 295 val mmioReq = Wire(chiselTypeOf(io.uncache.req)) 296 val ncReq = Wire(chiselTypeOf(io.uncache.req)) 297 val ncResp = Wire(chiselTypeOf(io.uncache.resp)) 298 val ncDoReq = Wire(Bool()) 299 val ncDoResp = Wire(Bool()) 300 val ncReadNextTrigger = Mux(io.uncacheOutstanding, ncDoReq, ncDoResp) 301 // ncDoReq is double RegNexted, as ubuffer data write takes 3 cycles. 302 // TODO lyq: to eliminate coupling by passing signals through ubuffer 303 val ncDeqTrigger = Mux(io.uncacheOutstanding, RegNext(RegNext(ncDoReq)), ncDoResp) 304 val ncPtr = Mux(io.uncacheOutstanding, RegNext(RegNext(io.uncache.req.bits.id)), io.uncache.resp.bits.id) 305 306 // store can be committed by ROB 307 io.rob.mmio := DontCare 308 io.rob.uop := DontCare 309 310 // Read dataModule 311 assert(EnsbufferWidth <= 2) 312 // rdataPtrExtNext and rdataPtrExtNext+1 entry will be read from dataModule 313 val rdataPtrExtNext = Wire(Vec(EnsbufferWidth, new SqPtr)) 314 rdataPtrExtNext := rdataPtrExt.map(i => i + 315 PopCount(dataBuffer.io.enq.map(x=> x.fire && x.bits.sqNeedDeq)) + 316 PopCount(ncReadNextTrigger || io.mmioStout.fire || io.vecmmioStout.fire) 317 ) 318 319 // deqPtrExtNext traces which inst is about to leave store queue 320 // 321 // io.sbuffer(i).fire is RegNexted, as sbuffer data write takes 2 cycles. 322 // Before data write finish, sbuffer is unable to provide store to load 323 // forward data. As an workaround, deqPtrExt and allocated flag update 324 // is delayed so that load can get the right data from store queue. 325 // 326 // Modify deqPtrExtNext and io.sqDeq with care! 327 val deqPtrExtNext = Wire(Vec(EnsbufferWidth, new SqPtr)) 328 // Only sqNeedDeq can move the ptr 329 deqPtrExtNext := deqPtrExt.map(i => i + 330 RegNext(PopCount(VecInit(io.sbuffer.map(x=> x.fire && x.bits.sqNeedDeq)))) + 331 PopCount(ncDeqTrigger || io.mmioStout.fire || io.vecmmioStout.fire) 332 ) 333 334 io.sqDeq := RegNext( 335 RegNext(PopCount(VecInit(io.sbuffer.map(x=> x.fire && x.bits.sqNeedDeq)))) + 336 PopCount(ncDeqTrigger || io.mmioStout.fire || io.vecmmioStout.fire) 337 ) 338 339 assert(!RegNext(RegNext(io.sbuffer(0).fire) && (io.mmioStout.fire || io.vecmmioStout.fire))) 340 341 for (i <- 0 until EnsbufferWidth) { 342 dataModule.io.raddr(i) := rdataPtrExtNext(i).value 343 paddrModule.io.raddr(i) := rdataPtrExtNext(i).value 344 vaddrModule.io.raddr(i) := rdataPtrExtNext(i).value 345 } 346 347 /** 348 * Enqueue at dispatch 349 * 350 * Currently, StoreQueue only allows enqueue when #emptyEntries > EnqWidth 351 * Dynamic enq based on numLsElem number 352 */ 353 io.enq.canAccept := allowEnqueue 354 val canEnqueue = io.enq.req.map(_.valid) 355 val enqCancel = io.enq.req.map(_.bits.robIdx.needFlush(io.brqRedirect)) 356 val vStoreFlow = io.enq.req.map(_.bits.numLsElem) 357 val validVStoreFlow = vStoreFlow.zipWithIndex.map{case (vLoadFlowNumItem, index) => Mux(!RegNext(io.brqRedirect.valid) && canEnqueue(index), vLoadFlowNumItem, 0.U)} 358 val validVStoreOffset = vStoreFlow.zip(io.enq.needAlloc).map{case (flow, needAllocItem) => Mux(needAllocItem, flow, 0.U)} 359 val validVStoreOffsetRShift = 0.U +: validVStoreOffset.take(vStoreFlow.length - 1) 360 361 val enqLowBound = io.enq.req.map(_.bits.sqIdx) 362 val enqUpBound = io.enq.req.map(x => x.bits.sqIdx + x.bits.numLsElem) 363 val enqCrossLoop = enqLowBound.zip(enqUpBound).map{case (low, up) => low.flag =/= up.flag} 364 365 for(i <- 0 until StoreQueueSize) { 366 val entryCanEnqSeq = (0 until io.enq.req.length).map { j => 367 val entryHitBound = Mux( 368 enqCrossLoop(j), 369 enqLowBound(j).value <= i.U || i.U < enqUpBound(j).value, 370 enqLowBound(j).value <= i.U && i.U < enqUpBound(j).value 371 ) 372 canEnqueue(j) && !enqCancel(j) && entryHitBound 373 } 374 375 val entryCanEnq = entryCanEnqSeq.reduce(_ || _) 376 val selectBits = ParallelPriorityMux(entryCanEnqSeq, io.enq.req.map(_.bits)) 377 val selectUpBound = ParallelPriorityMux(entryCanEnqSeq, enqUpBound) 378 when (entryCanEnq) { 379 uop(i) := selectBits 380 vecLastFlow(i) := Mux((i + 1).U === selectUpBound.value, selectBits.lastUop, false.B) 381 allocated(i) := true.B 382 datavalid(i) := false.B 383 addrvalid(i) := false.B 384 unaligned(i) := false.B 385 cross16Byte(i) := false.B 386 committed(i) := false.B 387 pending(i) := false.B 388 prefetch(i) := false.B 389 nc(i) := false.B 390 mmio(i) := false.B 391 isVec(i) := FuType.isVStore(selectBits.fuType) 392 vecMbCommit(i) := false.B 393 hasException(i) := false.B 394 waitStoreS2(i) := true.B 395 } 396 } 397 398 for (i <- 0 until io.enq.req.length) { 399 val sqIdx = enqPtrExt(0) + validVStoreOffsetRShift.take(i + 1).reduce(_ + _) 400 val index = io.enq.req(i).bits.sqIdx 401 when (canEnqueue(i) && !enqCancel(i)) { 402 XSError(!io.enq.canAccept || !io.enq.lqCanAccept, s"must accept $i\n") 403 XSError(index.value =/= sqIdx.value, s"must be the same entry $i\n") 404 } 405 io.enq.resp(i) := sqIdx 406 } 407 XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n") 408 409 /** 410 * Update addr/dataReadyPtr when issue from rs 411 */ 412 // update issuePtr 413 val IssuePtrMoveStride = 4 414 require(IssuePtrMoveStride >= 2) 415 416 val addrReadyLookupVec = (0 until IssuePtrMoveStride).map(addrReadyPtrExt + _.U) 417 val addrReadyLookup = addrReadyLookupVec.map(ptr => allocated(ptr.value) && 418 (mmio(ptr.value) || addrvalid(ptr.value) || vecMbCommit(ptr.value)) 419 && ptr =/= enqPtrExt(0)) 420 val nextAddrReadyPtr = addrReadyPtrExt + PriorityEncoder(VecInit(addrReadyLookup.map(!_) :+ true.B)) 421 addrReadyPtrExt := nextAddrReadyPtr 422 423 val stAddrReadyVecReg = Wire(Vec(StoreQueueSize, Bool())) 424 (0 until StoreQueueSize).map(i => { 425 stAddrReadyVecReg(i) := allocated(i) && (mmio(i) || addrvalid(i) || (isVec(i) && vecMbCommit(i))) 426 }) 427 io.stAddrReadyVec := GatedValidRegNext(stAddrReadyVecReg) 428 429 when (io.brqRedirect.valid) { 430 addrReadyPtrExt := Mux( 431 isAfter(cmtPtrExt(0), deqPtrExt(0)), 432 cmtPtrExt(0), 433 deqPtrExtNext(0) // for mmio insts, deqPtr may be ahead of cmtPtr 434 ) 435 } 436 437 io.stAddrReadySqPtr := addrReadyPtrExt 438 439 // update 440 val dataReadyLookupVec = (0 until IssuePtrMoveStride).map(dataReadyPtrExt + _.U) 441 val dataReadyLookup = dataReadyLookupVec.map(ptr => allocated(ptr.value) && 442 (mmio(ptr.value) || datavalid(ptr.value) || vecMbCommit(ptr.value)) 443 && ptr =/= enqPtrExt(0)) 444 val nextDataReadyPtr = dataReadyPtrExt + PriorityEncoder(VecInit(dataReadyLookup.map(!_) :+ true.B)) 445 dataReadyPtrExt := nextDataReadyPtr 446 447 val stDataReadyVecReg = Wire(Vec(StoreQueueSize, Bool())) 448 (0 until StoreQueueSize).map(i => { 449 stDataReadyVecReg(i) := allocated(i) && (mmio(i) || datavalid(i) || (isVec(i) && vecMbCommit(i))) 450 }) 451 io.stDataReadyVec := GatedValidRegNext(stDataReadyVecReg) 452 453 when (io.brqRedirect.valid) { 454 dataReadyPtrExt := Mux( 455 isAfter(cmtPtrExt(0), deqPtrExt(0)), 456 cmtPtrExt(0), 457 deqPtrExtNext(0) // for mmio insts, deqPtr may be ahead of cmtPtr 458 ) 459 } 460 461 io.stDataReadySqPtr := dataReadyPtrExt 462 io.stIssuePtr := enqPtrExt(0) 463 io.sqDeqPtr := deqPtrExt(0) 464 465 /** 466 * Writeback store from store units 467 * 468 * Most store instructions writeback to regfile in the previous cycle. 469 * However, 470 * (1) For an mmio instruction with exceptions, we need to mark it as addrvalid 471 * (in this way it will trigger an exception when it reaches ROB's head) 472 * instead of pending to avoid sending them to lower level. 473 * (2) For an mmio instruction without exceptions, we mark it as pending. 474 * When the instruction reaches ROB's head, StoreQueue sends it to uncache channel. 475 * Upon receiving the response, StoreQueue writes back the instruction 476 * through arbiter with store units. It will later commit as normal. 477 */ 478 479 // Write addr to sq 480 for (i <- 0 until StorePipelineWidth) { 481 paddrModule.io.wen(i) := false.B 482 vaddrModule.io.wen(i) := false.B 483 dataModule.io.mask.wen(i) := false.B 484 val stWbIndex = io.storeAddrIn(i).bits.uop.sqIdx.value 485 exceptionBuffer.io.storeAddrIn(i).valid := io.storeAddrIn(i).fire && !io.storeAddrIn(i).bits.miss && !io.storeAddrIn(i).bits.isvec 486 exceptionBuffer.io.storeAddrIn(i).bits := io.storeAddrIn(i).bits 487 // will re-enter exceptionbuffer at store_s2 488 exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).valid := false.B 489 exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits := 0.U.asTypeOf(new LsPipelineBundle) 490 491 when (io.storeAddrIn(i).fire && io.storeAddrIn(i).bits.updateAddrValid) { 492 val addr_valid = !io.storeAddrIn(i).bits.miss 493 addrvalid(stWbIndex) := addr_valid //!io.storeAddrIn(i).bits.mmio 494 nc(stWbIndex) := io.storeAddrIn(i).bits.nc 495 496 } 497 when (io.storeAddrIn(i).fire && !io.storeAddrIn(i).bits.isFrmMisAlignBuf) { 498 // pending(stWbIndex) := io.storeAddrIn(i).bits.mmio 499 unaligned(stWbIndex) := io.storeAddrIn(i).bits.isMisalign 500 cross16Byte(stWbIndex) := io.storeAddrIn(i).bits.isMisalign && !io.storeAddrIn(i).bits.misalignWith16Byte 501 502 paddrModule.io.waddr(i) := stWbIndex 503 paddrModule.io.wdata(i) := io.storeAddrIn(i).bits.paddr 504 paddrModule.io.wmask(i) := io.storeAddrIn(i).bits.mask 505 paddrModule.io.wlineflag(i) := io.storeAddrIn(i).bits.wlineflag 506 paddrModule.io.wen(i) := true.B 507 508 vaddrModule.io.waddr(i) := stWbIndex 509 vaddrModule.io.wdata(i) := io.storeAddrIn(i).bits.vaddr 510 vaddrModule.io.wmask(i) := io.storeAddrIn(i).bits.mask 511 vaddrModule.io.wlineflag(i) := io.storeAddrIn(i).bits.wlineflag 512 vaddrModule.io.wen(i) := true.B 513 514 debug_paddr(paddrModule.io.waddr(i)) := paddrModule.io.wdata(i) 515 516 // mmio(stWbIndex) := io.storeAddrIn(i).bits.mmio 517 518 XSInfo("store addr write to sq idx %d pc 0x%x miss:%d vaddr %x paddr %x mmio %x isvec %x\n", 519 io.storeAddrIn(i).bits.uop.sqIdx.value, 520 io.storeAddrIn(i).bits.uop.pc, 521 io.storeAddrIn(i).bits.miss, 522 io.storeAddrIn(i).bits.vaddr, 523 io.storeAddrIn(i).bits.paddr, 524 io.storeAddrIn(i).bits.mmio, 525 io.storeAddrIn(i).bits.isvec 526 ) 527 } 528 when (io.storeAddrIn(i).fire) { 529 uop(stWbIndex) := io.storeAddrIn(i).bits.uop 530 uop(stWbIndex).debugInfo := io.storeAddrIn(i).bits.uop.debugInfo 531 } 532 533 // re-replinish mmio, for pma/pmp will get mmio one cycle later 534 val storeAddrInFireReg = RegNext(io.storeAddrIn(i).fire && !io.storeAddrIn(i).bits.miss) && io.storeAddrInRe(i).updateAddrValid 535 //val stWbIndexReg = RegNext(stWbIndex) 536 val stWbIndexReg = RegEnable(stWbIndex, io.storeAddrIn(i).fire) 537 when (storeAddrInFireReg) { 538 pending(stWbIndexReg) := io.storeAddrInRe(i).mmio 539 mmio(stWbIndexReg) := io.storeAddrInRe(i).mmio 540 atomic(stWbIndexReg) := io.storeAddrInRe(i).atomic 541 hasException(stWbIndexReg) := io.storeAddrInRe(i).hasException 542 waitStoreS2(stWbIndexReg) := false.B 543 } 544 // dcache miss info (one cycle later than storeIn) 545 // if dcache report a miss in sta pipeline, this store will trigger a prefetch when committing to sbuffer (if EnableAtCommitMissTrigger) 546 when (storeAddrInFireReg) { 547 prefetch(stWbIndexReg) := io.storeAddrInRe(i).miss 548 } 549 // enter exceptionbuffer again 550 when (storeAddrInFireReg) { 551 exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).valid := io.storeAddrInRe(i).hasException && !io.storeAddrInRe(i).isvec 552 exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits := io.storeAddrInRe(i) 553 exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits.uop.exceptionVec(storeAccessFault) := io.storeAddrInRe(i).af 554 } 555 556 when(vaddrModule.io.wen(i)){ 557 debug_vaddr(vaddrModule.io.waddr(i)) := vaddrModule.io.wdata(i) 558 } 559 } 560 561 // Write data to sq 562 // Now store data pipeline is actually 2 stages 563 for (i <- 0 until StorePipelineWidth) { 564 dataModule.io.data.wen(i) := false.B 565 val stWbIndex = io.storeDataIn(i).bits.uop.sqIdx.value 566 val isVec = FuType.isVStore(io.storeDataIn(i).bits.uop.fuType) 567 // sq data write takes 2 cycles: 568 // sq data write s0 569 when (io.storeDataIn(i).fire) { 570 // send data write req to data module 571 dataModule.io.data.waddr(i) := stWbIndex 572 dataModule.io.data.wdata(i) := Mux(io.storeDataIn(i).bits.uop.fuOpType === LSUOpType.cbo_zero, 573 0.U, 574 Mux(isVec, 575 io.storeDataIn(i).bits.data, 576 genVWdata(io.storeDataIn(i).bits.data, io.storeDataIn(i).bits.uop.fuOpType(2,0))) 577 ) 578 dataModule.io.data.wen(i) := true.B 579 580 debug_data(dataModule.io.data.waddr(i)) := dataModule.io.data.wdata(i) 581 582 XSInfo("store data write to sq idx %d pc 0x%x data %x -> %x\n", 583 io.storeDataIn(i).bits.uop.sqIdx.value, 584 io.storeDataIn(i).bits.uop.pc, 585 io.storeDataIn(i).bits.data, 586 dataModule.io.data.wdata(i) 587 ) 588 } 589 // sq data write s1 590 val lastStWbIndex = RegEnable(stWbIndex, io.storeDataIn(i).fire) 591 when ( 592 RegNext(io.storeDataIn(i).fire) && allocated(lastStWbIndex) 593 // && !RegNext(io.storeDataIn(i).bits.uop).robIdx.needFlush(io.brqRedirect) 594 ) { 595 datavalid(lastStWbIndex) := true.B 596 } 597 } 598 599 // Write mask to sq 600 for (i <- 0 until StorePipelineWidth) { 601 // sq mask write s0 602 when (io.storeMaskIn(i).fire) { 603 // send data write req to data module 604 dataModule.io.mask.waddr(i) := io.storeMaskIn(i).bits.sqIdx.value 605 dataModule.io.mask.wdata(i) := io.storeMaskIn(i).bits.mask 606 dataModule.io.mask.wen(i) := true.B 607 } 608 } 609 610 /** 611 * load forward query 612 * 613 * Check store queue for instructions that is older than the load. 614 * The response will be valid at the next cycle after req. 615 */ 616 // check over all lq entries and forward data from the first matched store 617 for (i <- 0 until LoadPipelineWidth) { 618 // Compare deqPtr (deqPtr) and forward.sqIdx, we have two cases: 619 // (1) if they have the same flag, we need to check range(tail, sqIdx) 620 // (2) if they have different flags, we need to check range(tail, VirtualLoadQueueSize) and range(0, sqIdx) 621 // Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, VirtualLoadQueueSize)) 622 // Forward2: Mux(same_flag, 0.U, range(0, sqIdx) ) 623 // i.e. forward1 is the target entries with the same flag bits and forward2 otherwise 624 val differentFlag = deqPtrExt(0).flag =/= io.forward(i).sqIdx.flag 625 val forwardMask = io.forward(i).sqIdxMask 626 // all addrvalid terms need to be checked 627 // Real Vaild: all scalar stores, and vector store with (!inactive && !secondInvalid) 628 val addrRealValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => addrvalid(j) && allocated(j)))) 629 // vector store will consider all inactive || secondInvalid flows as valid 630 val addrValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => addrvalid(j) && allocated(j)))) 631 val dataValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => datavalid(j)))) 632 val allValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => addrvalid(j) && datavalid(j) && allocated(j)))) 633 634 val lfstEnable = Constantin.createRecord("LFSTEnable", LFSTEnable) 635 val storeSetHitVec = Mux(lfstEnable, 636 WireInit(VecInit((0 until StoreQueueSize).map(j => io.forward(i).uop.loadWaitBit && uop(j).robIdx === io.forward(i).uop.waitForRobIdx))), 637 WireInit(VecInit((0 until StoreQueueSize).map(j => uop(j).storeSetHit && uop(j).ssid === io.forward(i).uop.ssid))) 638 ) 639 640 val forwardMask1 = Mux(differentFlag, ~deqMask, deqMask ^ forwardMask) 641 val forwardMask2 = Mux(differentFlag, forwardMask, 0.U(StoreQueueSize.W)) 642 val canForward1 = forwardMask1 & allValidVec.asUInt 643 val canForward2 = forwardMask2 & allValidVec.asUInt 644 val needForward = Mux(differentFlag, ~deqMask | forwardMask, deqMask ^ forwardMask) 645 646 XSDebug(p"$i f1 ${Binary(canForward1)} f2 ${Binary(canForward2)} " + 647 p"sqIdx ${io.forward(i).sqIdx} pa ${Hexadecimal(io.forward(i).paddr)}\n" 648 ) 649 650 // do real fwd query (cam lookup in load_s1) 651 dataModule.io.needForward(i)(0) := canForward1 & vaddrModule.io.forwardMmask(i).asUInt 652 dataModule.io.needForward(i)(1) := canForward2 & vaddrModule.io.forwardMmask(i).asUInt 653 654 vaddrModule.io.forwardMdata(i) := io.forward(i).vaddr 655 vaddrModule.io.forwardDataMask(i) := io.forward(i).mask 656 paddrModule.io.forwardMdata(i) := io.forward(i).paddr 657 paddrModule.io.forwardDataMask(i) := io.forward(i).mask 658 659 // vaddr cam result does not equal to paddr cam result 660 // replay needed 661 // val vpmaskNotEqual = ((paddrModule.io.forwardMmask(i).asUInt ^ vaddrModule.io.forwardMmask(i).asUInt) & needForward) =/= 0.U 662 // val vaddrMatchFailed = vpmaskNotEqual && io.forward(i).valid 663 val vpmaskNotEqual = ( 664 (RegEnable(paddrModule.io.forwardMmask(i).asUInt, io.forward(i).valid) ^ RegEnable(vaddrModule.io.forwardMmask(i).asUInt, io.forward(i).valid)) & 665 RegNext(needForward) & 666 GatedRegNext(addrRealValidVec.asUInt) 667 ) =/= 0.U 668 val vaddrMatchFailed = vpmaskNotEqual && RegNext(io.forward(i).valid) 669 when (vaddrMatchFailed) { 670 XSInfo("vaddrMatchFailed: pc %x pmask %x vmask %x\n", 671 RegEnable(io.forward(i).uop.pc, io.forward(i).valid), 672 RegEnable(needForward & paddrModule.io.forwardMmask(i).asUInt, io.forward(i).valid), 673 RegEnable(needForward & vaddrModule.io.forwardMmask(i).asUInt, io.forward(i).valid) 674 ); 675 } 676 XSPerfAccumulate("vaddr_match_failed", vpmaskNotEqual) 677 XSPerfAccumulate("vaddr_match_really_failed", vaddrMatchFailed) 678 679 // Fast forward mask will be generated immediately (load_s1) 680 io.forward(i).forwardMaskFast := dataModule.io.forwardMaskFast(i) 681 682 // Forward result will be generated 1 cycle later (load_s2) 683 io.forward(i).forwardMask := dataModule.io.forwardMask(i) 684 io.forward(i).forwardData := dataModule.io.forwardData(i) 685 686 //TODO If the previous store appears out of alignment, then simply FF, this is a very unreasonable way to do it. 687 //TODO But for the time being, this is the way to ensure correctness. Such a suitable opportunity to support unaligned forward. 688 // If addr match, data not ready, mark it as dataInvalid 689 // load_s1: generate dataInvalid in load_s1 to set fastUop 690 val dataInvalidMask1 = ((addrValidVec.asUInt & ~dataValidVec.asUInt & vaddrModule.io.forwardMmask(i).asUInt) | unaligned.asUInt & allocated.asUInt) & forwardMask1.asUInt 691 val dataInvalidMask2 = ((addrValidVec.asUInt & ~dataValidVec.asUInt & vaddrModule.io.forwardMmask(i).asUInt) | unaligned.asUInt & allocated.asUInt) & forwardMask2.asUInt 692 val dataInvalidMask = dataInvalidMask1 | dataInvalidMask2 693 io.forward(i).dataInvalidFast := dataInvalidMask.orR 694 695 // make chisel happy 696 val dataInvalidMask1Reg = Wire(UInt(StoreQueueSize.W)) 697 dataInvalidMask1Reg := RegNext(dataInvalidMask1) 698 // make chisel happy 699 val dataInvalidMask2Reg = Wire(UInt(StoreQueueSize.W)) 700 dataInvalidMask2Reg := RegNext(dataInvalidMask2) 701 val dataInvalidMaskReg = dataInvalidMask1Reg | dataInvalidMask2Reg 702 703 // If SSID match, address not ready, mark it as addrInvalid 704 // load_s2: generate addrInvalid 705 val addrInvalidMask1 = (~addrValidVec.asUInt & storeSetHitVec.asUInt & forwardMask1.asUInt) 706 val addrInvalidMask2 = (~addrValidVec.asUInt & storeSetHitVec.asUInt & forwardMask2.asUInt) 707 // make chisel happy 708 val addrInvalidMask1Reg = Wire(UInt(StoreQueueSize.W)) 709 addrInvalidMask1Reg := RegNext(addrInvalidMask1) 710 // make chisel happy 711 val addrInvalidMask2Reg = Wire(UInt(StoreQueueSize.W)) 712 addrInvalidMask2Reg := RegNext(addrInvalidMask2) 713 val addrInvalidMaskReg = addrInvalidMask1Reg | addrInvalidMask2Reg 714 715 // load_s2 716 io.forward(i).dataInvalid := RegNext(io.forward(i).dataInvalidFast) 717 // check if vaddr forward mismatched 718 io.forward(i).matchInvalid := vaddrMatchFailed 719 720 // data invalid sq index 721 // check whether false fail 722 // check flag 723 val s2_differentFlag = RegNext(differentFlag) 724 val s2_enqPtrExt = RegNext(enqPtrExt(0)) 725 val s2_deqPtrExt = RegNext(deqPtrExt(0)) 726 727 // addr invalid sq index 728 // make chisel happy 729 val addrInvalidMaskRegWire = Wire(UInt(StoreQueueSize.W)) 730 addrInvalidMaskRegWire := addrInvalidMaskReg 731 val addrInvalidFlag = addrInvalidMaskRegWire.orR 732 val hasInvalidAddr = (~addrValidVec.asUInt & needForward).orR 733 734 val addrInvalidSqIdx1 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(addrInvalidMask1Reg)))) 735 val addrInvalidSqIdx2 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(addrInvalidMask2Reg)))) 736 val addrInvalidSqIdx = Mux(addrInvalidMask2Reg.orR, addrInvalidSqIdx2, addrInvalidSqIdx1) 737 738 // store-set content management 739 // +-----------------------+ 740 // | Search a SSID for the | 741 // | load operation | 742 // +-----------------------+ 743 // | 744 // V 745 // +-------------------+ 746 // | load wait strict? | 747 // +-------------------+ 748 // | 749 // V 750 // +----------------------+ 751 // Set| |Clean 752 // V V 753 // +------------------------+ +------------------------------+ 754 // | Waiting for all older | | Wait until the corresponding | 755 // | stores operations | | older store operations | 756 // +------------------------+ +------------------------------+ 757 758 759 760 when (RegEnable(io.forward(i).uop.loadWaitStrict, io.forward(i).valid)) { 761 io.forward(i).addrInvalidSqIdx := RegEnable((io.forward(i).uop.sqIdx - 1.U), io.forward(i).valid) 762 } .elsewhen (addrInvalidFlag) { 763 io.forward(i).addrInvalidSqIdx.flag := Mux(!s2_differentFlag || addrInvalidSqIdx >= s2_deqPtrExt.value, s2_deqPtrExt.flag, s2_enqPtrExt.flag) 764 io.forward(i).addrInvalidSqIdx.value := addrInvalidSqIdx 765 } .otherwise { 766 // may be store inst has been written to sbuffer already. 767 io.forward(i).addrInvalidSqIdx := RegEnable(io.forward(i).uop.sqIdx, io.forward(i).valid) 768 } 769 io.forward(i).addrInvalid := Mux(RegEnable(io.forward(i).uop.loadWaitStrict, io.forward(i).valid), RegNext(hasInvalidAddr), addrInvalidFlag) 770 771 // data invalid sq index 772 // make chisel happy 773 val dataInvalidMaskRegWire = Wire(UInt(StoreQueueSize.W)) 774 dataInvalidMaskRegWire := dataInvalidMaskReg 775 val dataInvalidFlag = dataInvalidMaskRegWire.orR 776 777 val dataInvalidSqIdx1 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(dataInvalidMask1Reg)))) 778 val dataInvalidSqIdx2 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(dataInvalidMask2Reg)))) 779 val dataInvalidSqIdx = Mux(dataInvalidMask2Reg.orR, dataInvalidSqIdx2, dataInvalidSqIdx1) 780 781 when (dataInvalidFlag) { 782 io.forward(i).dataInvalidSqIdx.flag := Mux(!s2_differentFlag || dataInvalidSqIdx >= s2_deqPtrExt.value, s2_deqPtrExt.flag, s2_enqPtrExt.flag) 783 io.forward(i).dataInvalidSqIdx.value := dataInvalidSqIdx 784 } .otherwise { 785 // may be store inst has been written to sbuffer already. 786 io.forward(i).dataInvalidSqIdx := RegEnable(io.forward(i).uop.sqIdx, io.forward(i).valid) 787 } 788 } 789 790 /** 791 * Memory mapped IO / other uncached operations / CMO 792 * 793 * States: 794 * (1) writeback from store units: mark as pending 795 * (2) when they reach ROB's head, they can be sent to uncache channel 796 * (3) response from uncache channel: mark as datavalidmask.wen 797 * (4) writeback to ROB (and other units): mark as writebacked 798 * (5) ROB commits the instruction: same as normal instructions 799 */ 800 //(2) when they reach ROB's head, they can be sent to uncache channel 801 // TODO: CAN NOT deal with vector mmio now! 802 val s_idle :: s_req :: s_resp :: s_wb :: s_wait :: Nil = Enum(5) 803 val mmioState = RegInit(s_idle) 804 val uncacheUop = Reg(new DynInst) 805 val cboFlushedSb = RegInit(false.B) 806 val cmoOpCode = uncacheUop.fuOpType(1, 0) 807 val mmioDoReq = io.uncache.req.fire && !io.uncache.req.bits.nc 808 val cboMmioPAddr = Reg(UInt(PAddrBits.W)) 809 switch(mmioState) { 810 is(s_idle) { 811 when(RegNext(io.rob.pendingst && uop(deqPtr).robIdx === io.rob.pendingPtr && pending(deqPtr) && allocated(deqPtr) && datavalid(deqPtr) && addrvalid(deqPtr) && !hasException(deqPtr))) { 812 mmioState := s_req 813 uncacheUop := uop(deqPtr) 814 uncacheUop.exceptionVec := 0.U.asTypeOf(ExceptionVec()) 815 uncacheUop.trigger := 0.U.asTypeOf(TriggerAction()) 816 cboFlushedSb := false.B 817 cboMmioPAddr := paddrModule.io.rdata(0) 818 } 819 } 820 is(s_req) { 821 when (mmioDoReq) { 822 mmioState := s_resp 823 } 824 } 825 is(s_resp) { 826 when(io.uncache.resp.fire && !io.uncache.resp.bits.nc) { 827 mmioState := s_wb 828 829 when (io.uncache.resp.bits.nderr) { 830 uncacheUop.exceptionVec(storeAccessFault) := true.B 831 } 832 } 833 } 834 is(s_wb) { 835 when (io.mmioStout.fire || io.vecmmioStout.fire) { 836 when (uncacheUop.exceptionVec(storeAccessFault)) { 837 mmioState := s_idle 838 }.otherwise { 839 mmioState := s_wait 840 } 841 } 842 } 843 is(s_wait) { 844 // A MMIO store can always move cmtPtrExt as it must be ROB head 845 when(scommit > 0.U) { 846 mmioState := s_idle // ready for next mmio 847 } 848 } 849 } 850 851 mmioReq.valid := mmioState === s_req 852 mmioReq.bits := DontCare 853 mmioReq.bits.cmd := MemoryOpConstants.M_XWR 854 mmioReq.bits.addr := paddrModule.io.rdata(0) // data(deqPtr) -> rdata(0) 855 mmioReq.bits.vaddr:= vaddrModule.io.rdata(0) 856 mmioReq.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data) 857 mmioReq.bits.mask := shiftMaskToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).mask) 858 mmioReq.bits.atomic := atomic(GatedRegNext(rdataPtrExtNext(0)).value) 859 mmioReq.bits.nc := false.B 860 mmioReq.bits.id := rdataPtrExt(0).value 861 862 /** 863 * NC Store 864 * (1) req: when it has been commited, it can be sent to lower level. 865 * (2) resp: because SQ data forward is required, it can only be deq when ncResp is received 866 */ 867 // TODO: CAN NOT deal with vector nc now! 868 val nc_idle :: nc_req :: nc_resp :: Nil = Enum(3) 869 val ncState = RegInit(nc_idle) 870 val rptr0 = rdataPtrExt(0).value 871 switch(ncState){ 872 is(nc_idle) { 873 when(nc(rptr0) && allocated(rptr0) && committed(rptr0) && !mmio(rptr0) && !isVec(rptr0)) { 874 ncState := nc_req 875 } 876 } 877 is(nc_req) { 878 when(ncDoReq) { 879 when(io.uncacheOutstanding) { 880 ncState := nc_idle 881 }.otherwise{ 882 ncState := nc_resp 883 } 884 } 885 } 886 is(nc_resp) { 887 when(ncResp.fire) { 888 ncState := nc_idle 889 } 890 } 891 } 892 893 ncDoReq := io.uncache.req.fire && io.uncache.req.bits.nc 894 ncDoResp := ncResp.fire 895 896 ncReq.valid := ncState === nc_req 897 ncReq.bits := DontCare 898 ncReq.bits.cmd := MemoryOpConstants.M_XWR 899 ncReq.bits.addr := paddrModule.io.rdata(0) 900 ncReq.bits.vaddr:= vaddrModule.io.rdata(0) 901 ncReq.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data) 902 ncReq.bits.mask := shiftMaskToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).mask) 903 ncReq.bits.atomic := atomic(GatedRegNext(rdataPtrExtNext(0)).value) 904 ncReq.bits.nc := true.B 905 ncReq.bits.id := rptr0 906 907 ncResp.ready := io.uncache.resp.ready 908 ncResp.valid := io.uncache.resp.fire && io.uncache.resp.bits.nc 909 ncResp.bits <> io.uncache.resp.bits 910 when (ncDeqTrigger) { 911 allocated(ncPtr) := false.B 912 XSDebug("nc fire: ptr %d\n", ncPtr) 913 } 914 915 mmioReq.ready := io.uncache.req.ready 916 ncReq.ready := io.uncache.req.ready && !mmioReq.valid 917 io.uncache.req.valid := mmioReq.valid || ncReq.valid 918 io.uncache.req.bits := Mux(mmioReq.valid, mmioReq.bits, ncReq.bits) 919 920 // CBO op type check can be delayed for 1 cycle, 921 // as uncache op will not start in s_idle 922 val cboMmioAddr = get_block_addr(cboMmioPAddr) 923 val deqCanDoCbo = GatedRegNext(LSUOpType.isCbo(uop(deqPtr).fuOpType) && allocated(deqPtr) && addrvalid(deqPtr)) 924 when (deqCanDoCbo) { 925 // disable uncache channel 926 io.uncache.req.valid := false.B 927 928 when (io.cmoOpReq.fire) { 929 mmioState := s_resp 930 } 931 932 when (mmioState === s_resp) { 933 when (io.cmoOpResp.fire) { 934 mmioState := s_wb 935 } 936 } 937 } 938 939 io.cmoOpReq.valid := deqCanDoCbo && cboFlushedSb && (mmioState === s_req) 940 io.cmoOpReq.bits.opcode := cmoOpCode 941 io.cmoOpReq.bits.address := cboMmioAddr 942 943 io.cmoOpResp.ready := deqCanDoCbo && (mmioState === s_resp) 944 945 io.flushSbuffer.valid := deqCanDoCbo && !cboFlushedSb && (mmioState === s_req) && !io.flushSbuffer.empty 946 947 when(deqCanDoCbo && !cboFlushedSb && (mmioState === s_req) && io.flushSbuffer.empty) { 948 cboFlushedSb := true.B 949 } 950 951 when(mmioDoReq){ 952 // mmio store should not be committed until uncache req is sent 953 pending(deqPtr) := false.B 954 955 XSDebug( 956 p"uncache mmio req: pc ${Hexadecimal(uop(deqPtr).pc)} " + 957 p"addr ${Hexadecimal(io.uncache.req.bits.addr)} " + 958 p"data ${Hexadecimal(io.uncache.req.bits.data)} " + 959 p"op ${Hexadecimal(io.uncache.req.bits.cmd)} " + 960 p"mask ${Hexadecimal(io.uncache.req.bits.mask)}\n" 961 ) 962 } 963 964 // (3) response from uncache channel: mark as datavalid 965 io.uncache.resp.ready := true.B 966 967 // (4) scalar store: writeback to ROB (and other units): mark as writebacked 968 io.mmioStout.valid := mmioState === s_wb && !isVec(deqPtr) 969 io.mmioStout.bits.uop := uncacheUop 970 io.mmioStout.bits.uop.exceptionVec := ExceptionNO.selectByFu(uncacheUop.exceptionVec, StaCfg) 971 io.mmioStout.bits.uop.sqIdx := deqPtrExt(0) 972 io.mmioStout.bits.uop.flushPipe := deqCanDoCbo // flush Pipeline to keep order in CMO 973 io.mmioStout.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data) // dataModule.io.rdata.read(deqPtr) 974 io.mmioStout.bits.isFromLoadUnit := DontCare 975 io.mmioStout.bits.debug.isMMIO := true.B 976 io.mmioStout.bits.debug.isNC := false.B 977 io.mmioStout.bits.debug.paddr := DontCare 978 io.mmioStout.bits.debug.isPerfCnt := false.B 979 io.mmioStout.bits.debug.vaddr := DontCare 980 // Remove MMIO inst from store queue after MMIO request is being sent 981 // That inst will be traced by uncache state machine 982 when (io.mmioStout.fire) { 983 allocated(deqPtr) := false.B 984 } 985 986 exceptionBuffer.io.storeAddrIn.last.valid := io.mmioStout.fire 987 exceptionBuffer.io.storeAddrIn.last.bits := DontCare 988 exceptionBuffer.io.storeAddrIn.last.bits.fullva := vaddrModule.io.rdata.head 989 exceptionBuffer.io.storeAddrIn.last.bits.vaNeedExt := true.B 990 exceptionBuffer.io.storeAddrIn.last.bits.uop := uncacheUop 991 992 // (4) or vector store: 993 // TODO: implement it! 994 io.vecmmioStout := DontCare 995 io.vecmmioStout.valid := false.B //mmioState === s_wb && isVec(deqPtr) 996 io.vecmmioStout.bits.uop := uop(deqPtr) 997 io.vecmmioStout.bits.uop.sqIdx := deqPtrExt(0) 998 io.vecmmioStout.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data) // dataModule.io.rdata.read(deqPtr) 999 io.vecmmioStout.bits.debug.isMMIO := true.B 1000 io.vecmmioStout.bits.debug.isNC := false.B 1001 io.vecmmioStout.bits.debug.paddr := DontCare 1002 io.vecmmioStout.bits.debug.isPerfCnt := false.B 1003 io.vecmmioStout.bits.debug.vaddr := DontCare 1004 // Remove MMIO inst from store queue after MMIO request is being sent 1005 // That inst will be traced by uncache state machine 1006 when (io.vecmmioStout.fire) { 1007 allocated(deqPtr) := false.B 1008 } 1009 1010 /** 1011 * ROB commits store instructions (mark them as committed) 1012 * 1013 * (1) When store commits, mark it as committed. 1014 * (2) They will not be cancelled and can be sent to lower level. 1015 */ 1016 XSError(mmioState =/= s_idle && mmioState =/= s_wait && commitCount > 0.U, 1017 "should not commit instruction when MMIO has not been finished\n") 1018 1019 val commitVec = WireInit(VecInit(Seq.fill(CommitWidth)(false.B))) 1020 val needCancel = Wire(Vec(StoreQueueSize, Bool())) // Will be assigned later 1021 1022 if (backendParams.debugEn){ dontTouch(commitVec) } 1023 1024 // TODO: Deal with vector store mmio 1025 for (i <- 0 until CommitWidth) { 1026 // don't mark misalign store as committed 1027 when ( 1028 allocated(cmtPtrExt(i).value) && 1029 isNotAfter(uop(cmtPtrExt(i).value).robIdx, GatedRegNext(io.rob.pendingPtr)) && 1030 !needCancel(cmtPtrExt(i).value) && 1031 (!waitStoreS2(cmtPtrExt(i).value) || isVec(cmtPtrExt(i).value))) { 1032 if (i == 0){ 1033 // TODO: fixme for vector mmio 1034 when ((mmioState === s_idle) || (mmioState === s_wait && scommit > 0.U)){ 1035 when ((isVec(cmtPtrExt(i).value) && vecMbCommit(cmtPtrExt(i).value)) || !isVec(cmtPtrExt(i).value)) { 1036 committed(cmtPtrExt(0).value) := true.B 1037 commitVec(0) := true.B 1038 } 1039 } 1040 } else { 1041 when ((isVec(cmtPtrExt(i).value) && vecMbCommit(cmtPtrExt(i).value)) || !isVec(cmtPtrExt(i).value)) { 1042 committed(cmtPtrExt(i).value) := commitVec(i - 1) || committed(cmtPtrExt(i).value) 1043 commitVec(i) := commitVec(i - 1) 1044 } 1045 } 1046 } 1047 } 1048 1049 commitCount := PopCount(commitVec) 1050 cmtPtrExt := cmtPtrExt.map(_ + commitCount) 1051 1052 /** 1053 * committed stores will not be cancelled and can be sent to lower level. 1054 * 1055 * 1. Store NC: Read data to uncache 1056 * implement as above 1057 * 1058 * 2. Store Cache: Read data from data module 1059 * remove retired insts from sq, add retired store to sbuffer. 1060 * as store queue grows larger and larger, time needed to read data from data 1061 * module keeps growing higher. Now we give data read a whole cycle. 1062 */ 1063 1064 //TODO An unaligned command can only be sent out if the databuffer can enter more than two. 1065 //TODO For now, hardcode the number of ENQs for the databuffer. 1066 val canDeqMisaligned = dataBuffer.io.enq(0).ready && dataBuffer.io.enq(1).ready 1067 val firstWithMisalign = unaligned(rdataPtrExt(0).value) 1068 val firstWithCross16Byte = cross16Byte(rdataPtrExt(0).value) 1069 1070 val isCross4KPage = io.maControl.toStoreQueue.crossPageWithHit 1071 val isCross4KPageCanDeq = io.maControl.toStoreQueue.crossPageCanDeq 1072 // When encountering a cross page store, a request needs to be sent to storeMisalignBuffer for the high page table's paddr. 1073 io.maControl.toStoreMisalignBuffer.sqPtr := rdataPtrExt(0) 1074 io.maControl.toStoreMisalignBuffer.doDeq := isCross4KPage && isCross4KPageCanDeq && dataBuffer.io.enq(0).fire 1075 io.maControl.toStoreMisalignBuffer.uop := uop(rdataPtrExt(0).value) 1076 for (i <- 0 until EnsbufferWidth) { 1077 val ptr = rdataPtrExt(i).value 1078 val mmioStall = if(i == 0) mmio(rdataPtrExt(0).value) else (mmio(rdataPtrExt(i).value) || mmio(rdataPtrExt(i-1).value)) 1079 val ncStall = if(i == 0) nc(rdataPtrExt(0).value) else (nc(rdataPtrExt(i).value) || nc(rdataPtrExt(i-1).value)) 1080 val exceptionValid = if(i == 0) hasException(rdataPtrExt(0).value) else { 1081 hasException(rdataPtrExt(i).value) || (hasException(rdataPtrExt(i-1).value) && uop(rdataPtrExt(i).value).robIdx === uop(rdataPtrExt(i-1).value).robIdx) 1082 } 1083 val vecNotAllMask = dataModule.io.rdata(i).mask.orR 1084 // Vector instructions that prevent triggered exceptions from being written to the 'databuffer'. 1085 val vecHasExceptionFlagValid = vecExceptionFlag.valid && isVec(ptr) && vecExceptionFlag.bits.robIdx === uop(ptr).robIdx 1086 1087 // Only the first interface can write unaligned directives. 1088 // Simplified design, even if the two ports have exceptions, but still only one unaligned dequeue. 1089 val assert_flag = WireInit(false.B) 1090 when(firstWithMisalign && firstWithCross16Byte) { 1091 dataBuffer.io.enq(0).valid := canDeqMisaligned && allocated(rdataPtrExt(0).value) && committed(rdataPtrExt(0).value) && 1092 ((!isVec(rdataPtrExt(0).value) && allvalid(rdataPtrExt(0).value) || vecMbCommit(rdataPtrExt(0).value)) && 1093 (!isCross4KPage || isCross4KPageCanDeq) || hasException(rdataPtrExt(0).value)) && !ncStall 1094 1095 dataBuffer.io.enq(1).valid := canDeqMisaligned && allocated(rdataPtrExt(0).value) && committed(rdataPtrExt(0).value) && 1096 (!isVec(rdataPtrExt(0).value) && allvalid(rdataPtrExt(0).value) || vecMbCommit(rdataPtrExt(0).value)) && 1097 (!isCross4KPage || isCross4KPageCanDeq) && !hasException(rdataPtrExt(0).value) && !ncStall 1098 assert_flag := dataBuffer.io.enq(1).valid 1099 }.otherwise { 1100 if (i == 0) { 1101 dataBuffer.io.enq(i).valid := ( 1102 allocated(ptr) && committed(ptr) 1103 && ((!isVec(ptr) && (allvalid(ptr) || hasException(ptr))) || vecMbCommit(ptr)) 1104 && !mmioStall && !ncStall 1105 && (!unaligned(ptr) || !cross16Byte(ptr) && (allvalid(ptr) || hasException(ptr))) 1106 ) 1107 } 1108 else { 1109 dataBuffer.io.enq(i).valid := ( 1110 allocated(ptr) && committed(ptr) 1111 && ((!isVec(ptr) && (allvalid(ptr) || hasException(ptr))) || vecMbCommit(ptr)) 1112 && !mmioStall && !ncStall 1113 && (!unaligned(ptr) || !cross16Byte(ptr) && (allvalid(ptr) || hasException(ptr))) 1114 ) 1115 } 1116 } 1117 1118 val misalignAddrLow = vaddrModule.io.rdata(0)(2, 0) 1119 val cross16ByteAddrLow4bit = vaddrModule.io.rdata(0)(3, 0) 1120 val addrLow4bit = vaddrModule.io.rdata(i)(3, 0) 1121 1122 // For unaligned, we need to generate a base-aligned mask in storeunit and then do a shift split in StoreQueue. 1123 val Cross16ByteMask = Wire(UInt(32.W)) 1124 val Cross16ByteData = Wire(UInt(256.W)) 1125 Cross16ByteMask := dataModule.io.rdata(0).mask << cross16ByteAddrLow4bit 1126 Cross16ByteData := dataModule.io.rdata(0).data << (cross16ByteAddrLow4bit << 3) 1127 1128 val paddrLow = Cat(paddrModule.io.rdata(0)(paddrModule.io.rdata(0).getWidth - 1, 3), 0.U(3.W)) 1129 val paddrHigh = Cat(paddrModule.io.rdata(0)(paddrModule.io.rdata(0).getWidth - 1, 3), 0.U(3.W)) + 8.U 1130 1131 val vaddrLow = Cat(vaddrModule.io.rdata(0)(vaddrModule.io.rdata(0).getWidth - 1, 3), 0.U(3.W)) 1132 val vaddrHigh = Cat(vaddrModule.io.rdata(0)(vaddrModule.io.rdata(0).getWidth - 1, 3), 0.U(3.W)) + 8.U 1133 1134 val maskLow = Cross16ByteMask(15, 0) 1135 val maskHigh = Cross16ByteMask(31, 16) 1136 1137 val dataLow = Cross16ByteData(127, 0) 1138 val dataHigh = Cross16ByteData(255, 128) 1139 1140 val toSbufferVecValid = (!isVec(ptr) || (vecMbCommit(ptr) && allvalid(ptr) && vecNotAllMask)) && !exceptionValid && !vecHasExceptionFlagValid 1141 when(canDeqMisaligned && firstWithMisalign && firstWithCross16Byte) { 1142 when(isCross4KPage && isCross4KPageCanDeq) { 1143 if (i == 0) { 1144 dataBuffer.io.enq(i).bits.addr := paddrLow 1145 dataBuffer.io.enq(i).bits.vaddr := vaddrLow 1146 dataBuffer.io.enq(i).bits.data := dataLow 1147 dataBuffer.io.enq(i).bits.mask := maskLow 1148 dataBuffer.io.enq(i).bits.wline := false.B 1149 dataBuffer.io.enq(i).bits.sqPtr := rdataPtrExt(0) 1150 dataBuffer.io.enq(i).bits.prefetch := false.B 1151 dataBuffer.io.enq(i).bits.sqNeedDeq := true.B 1152 dataBuffer.io.enq(i).bits.vecValid := toSbufferVecValid 1153 } 1154 else { 1155 dataBuffer.io.enq(i).bits.addr := io.maControl.toStoreQueue.paddr 1156 dataBuffer.io.enq(i).bits.vaddr := vaddrHigh 1157 dataBuffer.io.enq(i).bits.data := dataHigh 1158 dataBuffer.io.enq(i).bits.mask := maskHigh 1159 dataBuffer.io.enq(i).bits.wline := false.B 1160 dataBuffer.io.enq(i).bits.sqPtr := rdataPtrExt(0) 1161 dataBuffer.io.enq(i).bits.prefetch := false.B 1162 dataBuffer.io.enq(i).bits.sqNeedDeq := false.B 1163 dataBuffer.io.enq(i).bits.vecValid := dataBuffer.io.enq(0).bits.vecValid 1164 } 1165 } .otherwise { 1166 if (i == 0) { 1167 dataBuffer.io.enq(i).bits.addr := paddrLow 1168 dataBuffer.io.enq(i).bits.vaddr := vaddrLow 1169 dataBuffer.io.enq(i).bits.data := dataLow 1170 dataBuffer.io.enq(i).bits.mask := maskLow 1171 dataBuffer.io.enq(i).bits.wline := false.B 1172 dataBuffer.io.enq(i).bits.sqPtr := rdataPtrExt(0) 1173 dataBuffer.io.enq(i).bits.prefetch := false.B 1174 dataBuffer.io.enq(i).bits.sqNeedDeq := true.B 1175 dataBuffer.io.enq(i).bits.vecValid := toSbufferVecValid 1176 } 1177 else { 1178 dataBuffer.io.enq(i).bits.addr := paddrHigh 1179 dataBuffer.io.enq(i).bits.vaddr := vaddrHigh 1180 dataBuffer.io.enq(i).bits.data := dataHigh 1181 dataBuffer.io.enq(i).bits.mask := maskHigh 1182 dataBuffer.io.enq(i).bits.wline := false.B 1183 dataBuffer.io.enq(i).bits.sqPtr := rdataPtrExt(0) 1184 dataBuffer.io.enq(i).bits.prefetch := false.B 1185 dataBuffer.io.enq(i).bits.sqNeedDeq := false.B 1186 dataBuffer.io.enq(i).bits.vecValid := dataBuffer.io.enq(0).bits.vecValid 1187 } 1188 } 1189 1190 1191 }.elsewhen(!cross16Byte(ptr) && unaligned(ptr)) { 1192 dataBuffer.io.enq(i).bits.addr := Cat(paddrModule.io.rdata(i)(PAddrBits - 1, 4), 0.U(4.W)) 1193 dataBuffer.io.enq(i).bits.vaddr := Cat(vaddrModule.io.rdata(i)(VAddrBits - 1, 4), 0.U(4.W)) 1194 dataBuffer.io.enq(i).bits.data := dataModule.io.rdata(i).data << (addrLow4bit << 3) 1195 dataBuffer.io.enq(i).bits.mask := dataModule.io.rdata(i).mask 1196 dataBuffer.io.enq(i).bits.wline := paddrModule.io.rlineflag(i) 1197 dataBuffer.io.enq(i).bits.sqPtr := rdataPtrExt(i) 1198 dataBuffer.io.enq(i).bits.prefetch := prefetch(ptr) 1199 dataBuffer.io.enq(i).bits.sqNeedDeq := true.B 1200 // when scalar has exception, will also not write into sbuffer 1201 dataBuffer.io.enq(i).bits.vecValid := toSbufferVecValid 1202 }.otherwise { 1203 dataBuffer.io.enq(i).bits.addr := paddrModule.io.rdata(i) 1204 dataBuffer.io.enq(i).bits.vaddr := vaddrModule.io.rdata(i) 1205 dataBuffer.io.enq(i).bits.data := dataModule.io.rdata(i).data 1206 dataBuffer.io.enq(i).bits.mask := dataModule.io.rdata(i).mask 1207 dataBuffer.io.enq(i).bits.wline := paddrModule.io.rlineflag(i) 1208 dataBuffer.io.enq(i).bits.sqPtr := rdataPtrExt(i) 1209 dataBuffer.io.enq(i).bits.prefetch := prefetch(ptr) 1210 dataBuffer.io.enq(i).bits.sqNeedDeq := true.B 1211 // when scalar has exception, will also not write into sbuffer 1212 dataBuffer.io.enq(i).bits.vecValid := toSbufferVecValid 1213 1214 } 1215 1216 // Note that store data/addr should both be valid after store's commit 1217 assert(!dataBuffer.io.enq(i).valid || allvalid(ptr) || hasException(ptr) || (allocated(ptr) && vecMbCommit(ptr)) || assert_flag) 1218 } 1219 1220 // Send data stored in sbufferReqBitsReg to sbuffer 1221 for (i <- 0 until EnsbufferWidth) { 1222 io.sbuffer(i).valid := dataBuffer.io.deq(i).valid 1223 dataBuffer.io.deq(i).ready := io.sbuffer(i).ready 1224 io.sbuffer(i).bits := DontCare 1225 io.sbuffer(i).bits.cmd := MemoryOpConstants.M_XWR 1226 io.sbuffer(i).bits.addr := dataBuffer.io.deq(i).bits.addr 1227 io.sbuffer(i).bits.vaddr := dataBuffer.io.deq(i).bits.vaddr 1228 io.sbuffer(i).bits.data := dataBuffer.io.deq(i).bits.data 1229 io.sbuffer(i).bits.mask := dataBuffer.io.deq(i).bits.mask 1230 io.sbuffer(i).bits.wline := dataBuffer.io.deq(i).bits.wline && dataBuffer.io.deq(i).bits.vecValid 1231 io.sbuffer(i).bits.prefetch := dataBuffer.io.deq(i).bits.prefetch 1232 io.sbuffer(i).bits.vecValid := dataBuffer.io.deq(i).bits.vecValid 1233 io.sbuffer(i).bits.sqNeedDeq := dataBuffer.io.deq(i).bits.sqNeedDeq 1234 // io.sbuffer(i).fire is RegNexted, as sbuffer data write takes 2 cycles. 1235 // Before data write finish, sbuffer is unable to provide store to load 1236 // forward data. As an workaround, deqPtrExt and allocated flag update 1237 // is delayed so that load can get the right data from store queue. 1238 val ptr = dataBuffer.io.deq(i).bits.sqPtr.value 1239 when (RegNext(io.sbuffer(i).fire && io.sbuffer(i).bits.sqNeedDeq)) { 1240 allocated(RegEnable(ptr, io.sbuffer(i).fire)) := false.B 1241 XSDebug("sbuffer "+i+" fire: ptr %d\n", ptr) 1242 } 1243 } 1244 1245 // All vector instruction uop normally dequeue, but the Uop after the exception is raised does not write to the 'sbuffer'. 1246 // Flags are used to record whether there are any exceptions when the queue is displayed. 1247 // This is determined each time a write is made to the 'databuffer', prevent subsequent uop of the same instruction from writing to the 'dataBuffer'. 1248 val vecCommitHasException = (0 until EnsbufferWidth).map{ i => 1249 val ptr = rdataPtrExt(i).value 1250 val mmioStall = if(i == 0) mmio(rdataPtrExt(0).value) else (mmio(rdataPtrExt(i).value) || mmio(rdataPtrExt(i-1).value)) 1251 val ncStall = if(i == 0) nc(rdataPtrExt(0).value) else (nc(rdataPtrExt(i).value) || nc(rdataPtrExt(i-1).value)) 1252 val exceptionVliad = isVec(ptr) && hasException(ptr) && dataBuffer.io.enq(i).fire 1253 (exceptionVliad, uop(ptr), vecLastFlow(ptr)) 1254 } 1255 1256 val vecCommitHasExceptionValid = vecCommitHasException.map(_._1) 1257 val vecCommitHasExceptionUop = vecCommitHasException.map(_._2) 1258 val vecCommitHasExceptionLastFlow = vecCommitHasException.map(_._3) 1259 val vecCommitHasExceptionValidOR = vecCommitHasExceptionValid.reduce(_ || _) 1260 // Just select the last Uop tah has an exception. 1261 val vecCommitHasExceptionSelectUop = ParallelPosteriorityMux(vecCommitHasExceptionValid, vecCommitHasExceptionUop) 1262 // If the last flow with an exception is the LastFlow of this instruction, the flag is not set. 1263 // compare robidx to select the last flow 1264 require(EnsbufferWidth == 2, "The vector store exception handle process only support EnsbufferWidth == 2 yet.") 1265 val robidxEQ = dataBuffer.io.enq(0).fire && dataBuffer.io.enq(1).fire && 1266 uop(rdataPtrExt(0).value).robIdx === uop(rdataPtrExt(1).value).robIdx 1267 val robidxNE = dataBuffer.io.enq(0).fire && dataBuffer.io.enq(1).fire && ( 1268 uop(rdataPtrExt(0).value).robIdx =/= uop(rdataPtrExt(1).value).robIdx 1269 ) 1270 val onlyCommit0 = dataBuffer.io.enq(0).fire && !dataBuffer.io.enq(1).fire 1271 1272 val vecCommitLastFlow = 1273 // robidx equal => check if 1 is last flow 1274 robidxEQ && vecCommitHasExceptionLastFlow(1) || 1275 // robidx not equal => 0 must be the last flow, just check if 1 is last flow when 1 has exception 1276 robidxNE && (vecCommitHasExceptionValid(1) && vecCommitHasExceptionLastFlow(1) || !vecCommitHasExceptionValid(1)) || 1277 onlyCommit0 && vecCommitHasExceptionLastFlow(0) 1278 1279 1280 val vecExceptionFlagCancel = (0 until EnsbufferWidth).map{ i => 1281 val ptr = rdataPtrExt(i).value 1282 val vecLastFlowCommit = vecLastFlow(ptr) && (uop(ptr).robIdx === vecExceptionFlag.bits.robIdx) && dataBuffer.io.enq(i).fire 1283 vecLastFlowCommit 1284 }.reduce(_ || _) 1285 1286 // When a LastFlow with an exception instruction is commited, clear the flag. 1287 when(!vecExceptionFlag.valid && vecCommitHasExceptionValidOR && !vecCommitLastFlow) { 1288 vecExceptionFlag.valid := true.B 1289 vecExceptionFlag.bits := vecCommitHasExceptionSelectUop 1290 }.elsewhen(vecExceptionFlag.valid && vecExceptionFlagCancel) { 1291 vecExceptionFlag.valid := false.B 1292 vecExceptionFlag.bits := 0.U.asTypeOf(new DynInst) 1293 } 1294 1295 // A dumb defensive code. The flag should not be placed for a long period of time. 1296 // A relatively large timeout period, not have any special meaning. 1297 // If an assert appears and you confirm that it is not a Bug: Increase the timeout or remove the assert. 1298 TimeOutAssert(vecExceptionFlag.valid, 3000, "vecExceptionFlag timeout, Plase check for bugs or add timeouts.") 1299 1300 // Initialize when unenabled difftest. 1301 for (i <- 0 until EnsbufferWidth) { 1302 io.sbufferVecDifftestInfo(i) := DontCare 1303 } 1304 // Consistent with the logic above. 1305 // Only the vector store difftest required signal is separated from the rtl code. 1306 if (env.EnableDifftest) { 1307 for (i <- 0 until EnsbufferWidth) { 1308 val ptr = dataBuffer.io.enq(i).bits.sqPtr.value 1309 difftestBuffer.get.io.enq(i).valid := dataBuffer.io.enq(i).valid 1310 difftestBuffer.get.io.enq(i).bits := uop(ptr) 1311 } 1312 for (i <- 0 until EnsbufferWidth) { 1313 io.sbufferVecDifftestInfo(i).valid := difftestBuffer.get.io.deq(i).valid 1314 difftestBuffer.get.io.deq(i).ready := io.sbufferVecDifftestInfo(i).ready 1315 1316 io.sbufferVecDifftestInfo(i).bits := difftestBuffer.get.io.deq(i).bits 1317 } 1318 1319 // commit cbo.inval to difftest 1320 val cmoInvalEvent = DifftestModule(new DiffCMOInvalEvent) 1321 cmoInvalEvent.coreid := io.hartId 1322 cmoInvalEvent.valid := io.mmioStout.fire && deqCanDoCbo && LSUOpType.isCboInval(uop(deqPtr).fuOpType) 1323 cmoInvalEvent.addr := cboMmioAddr 1324 } 1325 1326 (1 until EnsbufferWidth).foreach(i => when(io.sbuffer(i).fire) { assert(io.sbuffer(i - 1).fire) }) 1327 if (coreParams.dcacheParametersOpt.isEmpty) { 1328 for (i <- 0 until EnsbufferWidth) { 1329 val ptr = deqPtrExt(i).value 1330 val ram = DifftestMem(64L * 1024 * 1024 * 1024, 8) 1331 val wen = allocated(ptr) && committed(ptr) && !mmio(ptr) 1332 val waddr = ((paddrModule.io.rdata(i) - "h80000000".U) >> 3).asUInt 1333 val wdata = Mux(paddrModule.io.rdata(i)(3), dataModule.io.rdata(i).data(127, 64), dataModule.io.rdata(i).data(63, 0)) 1334 val wmask = Mux(paddrModule.io.rdata(i)(3), dataModule.io.rdata(i).mask(15, 8), dataModule.io.rdata(i).mask(7, 0)) 1335 when (wen) { 1336 ram.write(waddr, wdata.asTypeOf(Vec(8, UInt(8.W))), wmask.asBools) 1337 } 1338 } 1339 } 1340 1341 // Read vaddr for mem exception 1342 io.exceptionAddr.vaddr := exceptionBuffer.io.exceptionAddr.vaddr 1343 io.exceptionAddr.vaNeedExt := exceptionBuffer.io.exceptionAddr.vaNeedExt 1344 io.exceptionAddr.isHyper := exceptionBuffer.io.exceptionAddr.isHyper 1345 io.exceptionAddr.gpaddr := exceptionBuffer.io.exceptionAddr.gpaddr 1346 io.exceptionAddr.vstart := exceptionBuffer.io.exceptionAddr.vstart 1347 io.exceptionAddr.vl := exceptionBuffer.io.exceptionAddr.vl 1348 io.exceptionAddr.isForVSnonLeafPTE := exceptionBuffer.io.exceptionAddr.isForVSnonLeafPTE 1349 1350 // vector commit or replay from 1351 val vecCommittmp = Wire(Vec(StoreQueueSize, Vec(VecStorePipelineWidth, Bool()))) 1352 val vecCommit = Wire(Vec(StoreQueueSize, Bool())) 1353 for (i <- 0 until StoreQueueSize) { 1354 val fbk = io.vecFeedback 1355 for (j <- 0 until VecStorePipelineWidth) { 1356 vecCommittmp(i)(j) := fbk(j).valid && (fbk(j).bits.isCommit || fbk(j).bits.isFlush) && 1357 uop(i).robIdx === fbk(j).bits.robidx && uop(i).uopIdx === fbk(j).bits.uopidx && allocated(i) 1358 } 1359 vecCommit(i) := vecCommittmp(i).reduce(_ || _) 1360 1361 when (vecCommit(i)) { 1362 vecMbCommit(i) := true.B 1363 } 1364 } 1365 1366 // For vector, when there is a store across pages with the same uop in storeMisalignBuffer, storequeue needs to mark this item as committed. 1367 // TODO FIXME Can vecMbCommit be removed? 1368 when(io.maControl.toStoreQueue.withSameUop && allvalid(rdataPtrExt(0).value)) { 1369 vecMbCommit(rdataPtrExt(0).value) := true.B 1370 } 1371 1372 // misprediction recovery / exception redirect 1373 // invalidate sq term using robIdx 1374 for (i <- 0 until StoreQueueSize) { 1375 needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i) && !committed(i) && 1376 (!isVec(i) || !(uop(i).robIdx === io.brqRedirect.bits.robIdx)) 1377 when (needCancel(i)) { 1378 allocated(i) := false.B 1379 } 1380 } 1381 1382 /** 1383* update pointers 1384**/ 1385 val enqCancelValid = canEnqueue.zip(io.enq.req).map{case (v , x) => 1386 v && x.bits.robIdx.needFlush(io.brqRedirect) 1387 } 1388 val enqCancelNum = enqCancelValid.zip(io.enq.req).map{case (v, req) => 1389 Mux(v, req.bits.numLsElem, 0.U) 1390 } 1391 val lastEnqCancel = RegEnable(enqCancelNum.reduce(_ + _), io.brqRedirect.valid) // 1 cycle after redirect 1392 1393 val lastCycleCancelCount = PopCount(RegEnable(needCancel, io.brqRedirect.valid)) // 1 cycle after redirect 1394 val lastCycleRedirect = RegNext(io.brqRedirect.valid) // 1 cycle after redirect 1395 val enqNumber = validVStoreFlow.reduce(_ + _) 1396 1397 val lastlastCycleRedirect=RegNext(lastCycleRedirect)// 2 cycle after redirect 1398 val redirectCancelCount = RegEnable(lastCycleCancelCount + lastEnqCancel, 0.U, lastCycleRedirect) // 2 cycle after redirect 1399 1400 when (lastlastCycleRedirect) { 1401 // we recover the pointers in 2 cycle after redirect for better timing 1402 enqPtrExt := VecInit(enqPtrExt.map(_ - redirectCancelCount)) 1403 }.otherwise { 1404 // lastCycleRedirect.valid or nornal case 1405 // when lastCycleRedirect.valid, enqNumber === 0.U, enqPtrExt will not change 1406 enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber)) 1407 } 1408 assert(!(lastCycleRedirect && enqNumber =/= 0.U)) 1409 1410 deqPtrExt := deqPtrExtNext 1411 rdataPtrExt := rdataPtrExtNext 1412 1413 // val dequeueCount = Mux(io.sbuffer(1).fire, 2.U, Mux(io.sbuffer(0).fire || io.mmioStout.fire, 1.U, 0.U)) 1414 1415 // If redirect at T0, sqCancelCnt is at T2 1416 io.sqCancelCnt := redirectCancelCount 1417 val ForceWriteUpper = Wire(UInt(log2Up(StoreQueueSize + 1).W)) 1418 ForceWriteUpper := Constantin.createRecord(s"ForceWriteUpper_${p(XSCoreParamsKey).HartId}", initValue = 60) 1419 val ForceWriteLower = Wire(UInt(log2Up(StoreQueueSize + 1).W)) 1420 ForceWriteLower := Constantin.createRecord(s"ForceWriteLower_${p(XSCoreParamsKey).HartId}", initValue = 55) 1421 1422 val valid_cnt = PopCount(allocated) 1423 io.force_write := RegNext(Mux(valid_cnt >= ForceWriteUpper, true.B, valid_cnt >= ForceWriteLower && io.force_write), init = false.B) 1424 1425 // io.sqempty will be used by sbuffer 1426 // We delay it for 1 cycle for better timing 1427 // When sbuffer need to check if it is empty, the pipeline is blocked, which means delay io.sqempty 1428 // for 1 cycle will also promise that sq is empty in that cycle 1429 io.sqEmpty := RegNext( 1430 enqPtrExt(0).value === deqPtrExt(0).value && 1431 enqPtrExt(0).flag === deqPtrExt(0).flag 1432 ) 1433 // perf counter 1434 QueuePerf(StoreQueueSize, validCount, !allowEnqueue) 1435 val vecValidVec = WireInit(VecInit((0 until StoreQueueSize).map(i => allocated(i) && isVec(i)))) 1436 QueuePerf(StoreQueueSize, PopCount(vecValidVec), !allowEnqueue) 1437 io.sqFull := !allowEnqueue 1438 XSPerfAccumulate("mmioCycle", mmioState =/= s_idle) // lq is busy dealing with uncache req 1439 XSPerfAccumulate("mmioCnt", mmioDoReq) 1440 XSPerfAccumulate("mmio_wb_success", io.mmioStout.fire || io.vecmmioStout.fire) 1441 XSPerfAccumulate("mmio_wb_blocked", (io.mmioStout.valid && !io.mmioStout.ready) || (io.vecmmioStout.valid && !io.vecmmioStout.ready)) 1442 XSPerfAccumulate("validEntryCnt", distanceBetween(enqPtrExt(0), deqPtrExt(0))) 1443 XSPerfAccumulate("cmtEntryCnt", distanceBetween(cmtPtrExt(0), deqPtrExt(0))) 1444 XSPerfAccumulate("nCmtEntryCnt", distanceBetween(enqPtrExt(0), cmtPtrExt(0))) 1445 1446 val perfValidCount = distanceBetween(enqPtrExt(0), deqPtrExt(0)) 1447 val perfEvents = Seq( 1448 ("mmioCycle ", mmioState =/= s_idle), 1449 ("mmioCnt ", mmioDoReq), 1450 ("mmio_wb_success", io.mmioStout.fire || io.vecmmioStout.fire), 1451 ("mmio_wb_blocked", (io.mmioStout.valid && !io.mmioStout.ready) || (io.vecmmioStout.valid && !io.vecmmioStout.ready)), 1452 ("stq_1_4_valid ", (perfValidCount < (StoreQueueSize.U/4.U))), 1453 ("stq_2_4_valid ", (perfValidCount > (StoreQueueSize.U/4.U)) & (perfValidCount <= (StoreQueueSize.U/2.U))), 1454 ("stq_3_4_valid ", (perfValidCount > (StoreQueueSize.U/2.U)) & (perfValidCount <= (StoreQueueSize.U*3.U/4.U))), 1455 ("stq_4_4_valid ", (perfValidCount > (StoreQueueSize.U*3.U/4.U))), 1456 ) 1457 generatePerfEvent() 1458 1459 // debug info 1460 XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt(0).flag, deqPtr) 1461 1462 def PrintFlag(flag: Bool, name: String): Unit = { 1463 when(flag) { 1464 XSDebug(false, true.B, name) 1465 }.otherwise { 1466 XSDebug(false, true.B, " ") 1467 } 1468 } 1469 1470 for (i <- 0 until StoreQueueSize) { 1471 XSDebug(s"$i: pc %x va %x pa %x data %x ", 1472 uop(i).pc, 1473 debug_vaddr(i), 1474 debug_paddr(i), 1475 debug_data(i) 1476 ) 1477 PrintFlag(allocated(i), "a") 1478 PrintFlag(allocated(i) && addrvalid(i), "a") 1479 PrintFlag(allocated(i) && datavalid(i), "d") 1480 PrintFlag(allocated(i) && committed(i), "c") 1481 PrintFlag(allocated(i) && pending(i), "p") 1482 PrintFlag(allocated(i) && mmio(i), "m") 1483 XSDebug(false, true.B, "\n") 1484 } 1485 1486} 1487