1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import utils._ 24import utility._ 25import xiangshan.cache._ 26import xiangshan.mem._ 27import xiangshan.backend.Bundles.DynInst 28import difftest._ 29import freechips.rocketchip.util._ 30import xiangshan.backend.fu.FuType._ 31 32class SbufferFlushBundle extends Bundle { 33 val valid = Output(Bool()) 34 val empty = Input(Bool()) 35} 36 37trait HasSbufferConst extends HasXSParameter { 38 val EvictCycles = 1 << 20 39 val SbufferReplayDelayCycles = 16 40 require(isPow2(EvictCycles)) 41 val EvictCountBits = log2Up(EvictCycles+1) 42 val MissqReplayCountBits = log2Up(SbufferReplayDelayCycles) + 1 43 44 // dcache write hit resp has 2 sources 45 // refill pipe resp and main pipe resp (fixed:only main pipe resp) 46 // val NumDcacheWriteResp = 2 // hardcoded 47 val NumDcacheWriteResp = 1 // hardcoded 48 49 val SbufferIndexWidth: Int = log2Up(StoreBufferSize) 50 // paddr = ptag + offset 51 val CacheLineBytes: Int = CacheLineSize / 8 52 val CacheLineWords: Int = CacheLineBytes / DataBytes 53 val OffsetWidth: Int = log2Up(CacheLineBytes) 54 val WordsWidth: Int = log2Up(CacheLineWords) 55 val PTagWidth: Int = PAddrBits - OffsetWidth 56 val VTagWidth: Int = VAddrBits - OffsetWidth 57 val WordOffsetWidth: Int = PAddrBits - WordsWidth 58 59 val CacheLineVWords: Int = CacheLineBytes / VDataBytes 60 val VWordsWidth: Int = log2Up(CacheLineVWords) 61 val VWordWidth: Int = log2Up(VDataBytes) 62 val VWordOffsetWidth: Int = PAddrBits - VWordWidth 63} 64 65class SbufferEntryState (implicit p: Parameters) extends SbufferBundle { 66 val state_valid = Bool() // this entry is active 67 val state_inflight = Bool() // sbuffer is trying to write this entry to dcache 68 val w_timeout = Bool() // with timeout resp, waiting for resend store pipeline req timeout 69 val w_sameblock_inflight = Bool() // same cache block dcache req is inflight 70 71 def isInvalid(): Bool = !state_valid 72 def isValid(): Bool = state_valid 73 def isActive(): Bool = state_valid && !state_inflight 74 def isInflight(): Bool = state_inflight 75 def isDcacheReqCandidate(): Bool = state_valid && !state_inflight && !w_sameblock_inflight 76} 77 78class SbufferBundle(implicit p: Parameters) extends XSBundle with HasSbufferConst 79 80class DataWriteReq(implicit p: Parameters) extends SbufferBundle { 81 // univerisal writemask 82 val wvec = UInt(StoreBufferSize.W) 83 // 2 cycle update 84 val mask = UInt((VLEN/8).W) 85 val data = UInt(VLEN.W) 86 val vwordOffset = UInt(VWordOffsetWidth.W) 87 val wline = Bool() // write full cacheline 88} 89 90class MaskFlushReq(implicit p: Parameters) extends SbufferBundle { 91 // univerisal writemask 92 val wvec = UInt(StoreBufferSize.W) 93} 94 95class SbufferData(implicit p: Parameters) extends XSModule with HasSbufferConst { 96 val io = IO(new Bundle(){ 97 // update data and mask when alloc or merge 98 val writeReq = Vec(EnsbufferWidth, Flipped(ValidIO(new DataWriteReq))) 99 // clean mask when deq 100 val maskFlushReq = Vec(NumDcacheWriteResp, Flipped(ValidIO(new MaskFlushReq))) 101 val dataOut = Output(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, UInt(8.W))))) 102 val maskOut = Output(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, Bool())))) 103 }) 104 105 val data = Reg(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, UInt(8.W))))) 106 // val mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool())))) 107 val mask = RegInit( 108 VecInit(Seq.fill(StoreBufferSize)( 109 VecInit(Seq.fill(CacheLineVWords)( 110 VecInit(Seq.fill(VDataBytes)(false.B)) 111 )) 112 )) 113 ) 114 115 // 2 cycle line mask clean 116 for(line <- 0 until StoreBufferSize){ 117 val line_mask_clean_flag = GatedValidRegNext( 118 io.maskFlushReq.map(a => a.valid && a.bits.wvec(line)).reduce(_ || _) 119 ) 120 line_mask_clean_flag.suggestName("line_mask_clean_flag_"+line) 121 when(line_mask_clean_flag){ 122 for(word <- 0 until CacheLineVWords){ 123 for(byte <- 0 until VDataBytes){ 124 mask(line)(word)(byte) := false.B 125 } 126 } 127 } 128 } 129 130 // 2 cycle data / mask update 131 for(i <- 0 until EnsbufferWidth) { 132 val req = io.writeReq(i) 133 for(line <- 0 until StoreBufferSize){ 134 val sbuffer_in_s1_line_wen = req.valid && req.bits.wvec(line) 135 val sbuffer_in_s2_line_wen = GatedValidRegNext(sbuffer_in_s1_line_wen) 136 val line_write_buffer_data = RegEnable(req.bits.data, sbuffer_in_s1_line_wen) 137 val line_write_buffer_wline = RegEnable(req.bits.wline, sbuffer_in_s1_line_wen) 138 val line_write_buffer_mask = RegEnable(req.bits.mask, sbuffer_in_s1_line_wen) 139 val line_write_buffer_offset = RegEnable(req.bits.vwordOffset(VWordsWidth-1, 0), sbuffer_in_s1_line_wen) 140 sbuffer_in_s1_line_wen.suggestName("sbuffer_in_s1_line_wen_"+line) 141 sbuffer_in_s2_line_wen.suggestName("sbuffer_in_s2_line_wen_"+line) 142 line_write_buffer_data.suggestName("line_write_buffer_data_"+line) 143 line_write_buffer_wline.suggestName("line_write_buffer_wline_"+line) 144 line_write_buffer_mask.suggestName("line_write_buffer_mask_"+line) 145 line_write_buffer_offset.suggestName("line_write_buffer_offset_"+line) 146 for(word <- 0 until CacheLineVWords){ 147 for(byte <- 0 until VDataBytes){ 148 val write_byte = sbuffer_in_s2_line_wen && ( 149 line_write_buffer_mask(byte) && (line_write_buffer_offset === word.U) || 150 line_write_buffer_wline 151 ) 152 when(write_byte){ 153 data(line)(word)(byte) := line_write_buffer_data(byte*8+7, byte*8) 154 mask(line)(word)(byte) := true.B 155 } 156 } 157 } 158 } 159 } 160 161 // 1 cycle line mask clean 162 // for(i <- 0 until EnsbufferWidth) { 163 // val req = io.writeReq(i) 164 // when(req.valid){ 165 // for(line <- 0 until StoreBufferSize){ 166 // when( 167 // req.bits.wvec(line) && 168 // req.bits.cleanMask 169 // ){ 170 // for(word <- 0 until CacheLineWords){ 171 // for(byte <- 0 until DataBytes){ 172 // mask(line)(word)(byte) := false.B 173 // val debug_last_cycle_write_byte = RegNext(req.valid && req.bits.wvec(line) && ( 174 // req.bits.mask(byte) && (req.bits.wordOffset(WordsWidth-1, 0) === word.U) || 175 // req.bits.wline 176 // )) 177 // assert(!debug_last_cycle_write_byte) 178 // } 179 // } 180 // } 181 // } 182 // } 183 // } 184 185 io.dataOut := data 186 io.maskOut := mask 187} 188 189class Sbuffer(implicit p: Parameters) 190 extends DCacheModule 191 with HasSbufferConst 192 with HasPerfEvents { 193 val io = IO(new Bundle() { 194 val hartId = Input(UInt(hartIdLen.W)) 195 val in = Vec(EnsbufferWidth, Flipped(Decoupled(new DCacheWordReqWithVaddrAndPfFlag))) //Todo: store logic only support Width == 2 now 196 val vecDifftestInfo = Vec(EnsbufferWidth, Flipped(Decoupled(new DynInst))) 197 val dcache = Flipped(new DCacheToSbufferIO) 198 val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO)) 199 val sqempty = Input(Bool()) 200 val flush = Flipped(new SbufferFlushBundle) 201 val csrCtrl = Flipped(new CustomCSRCtrlIO) 202 val store_prefetch = Vec(StorePipelineWidth, DecoupledIO(new StorePrefetchReq)) // to dcache 203 val memSetPattenDetected = Input(Bool()) 204 val force_write = Input(Bool()) 205 }) 206 207 val dataModule = Module(new SbufferData) 208 dataModule.io.writeReq <> DontCare 209 val prefetcher = Module(new StorePfWrapper()) 210 val writeReq = dataModule.io.writeReq 211 212 val ptag = Reg(Vec(StoreBufferSize, UInt(PTagWidth.W))) 213 val vtag = Reg(Vec(StoreBufferSize, UInt(VTagWidth.W))) 214 val debug_mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool())))) 215 val waitInflightMask = Reg(Vec(StoreBufferSize, UInt(StoreBufferSize.W))) 216 val data = dataModule.io.dataOut 217 val mask = dataModule.io.maskOut 218 val stateVec = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U.asTypeOf(new SbufferEntryState)))) 219 val cohCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(EvictCountBits.W)))) 220 val missqReplayCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(MissqReplayCountBits.W)))) 221 222 val sbuffer_out_s0_fire = Wire(Bool()) 223 224 /* 225 idle --[flush] --> drain --[buf empty]--> idle 226 --[buf full]--> replace --[dcache resp]--> idle 227 */ 228 // x_drain_all: drain store queue and sbuffer 229 // x_drain_sbuffer: drain sbuffer only, block store queue to sbuffer write 230 val x_idle :: x_replace :: x_drain_all :: x_drain_sbuffer :: Nil = Enum(4) 231 def needDrain(state: UInt): Bool = 232 state(1) 233 val sbuffer_state = RegInit(x_idle) 234 235 // ---------------------- Store Enq Sbuffer --------------------- 236 237 def getPTag(pa: UInt): UInt = 238 pa(PAddrBits - 1, PAddrBits - PTagWidth) 239 240 def getVTag(va: UInt): UInt = 241 va(VAddrBits - 1, VAddrBits - VTagWidth) 242 243 def getWord(pa: UInt): UInt = 244 pa(PAddrBits-1, 3) 245 246 def getVWord(pa: UInt): UInt = 247 pa(PAddrBits-1, 4) 248 249 def getWordOffset(pa: UInt): UInt = 250 pa(OffsetWidth-1, 3) 251 252 def getVWordOffset(pa: UInt): UInt = 253 pa(OffsetWidth-1, 4) 254 255 def getAddr(ptag: UInt): UInt = 256 Cat(ptag, 0.U((PAddrBits - PTagWidth).W)) 257 258 def getByteOffset(offect: UInt): UInt = 259 Cat(offect(OffsetWidth - 1, 3), 0.U(3.W)) 260 261 def isOneOf(key: UInt, seq: Seq[UInt]): Bool = 262 if(seq.isEmpty) false.B else Cat(seq.map(_===key)).orR 263 264 def widthMap[T <: Data](f: Int => T) = (0 until StoreBufferSize) map f 265 266 // sbuffer entry count 267 268 val plru = new ValidPseudoLRU(StoreBufferSize) 269 val accessIdx = Wire(Vec(EnsbufferWidth + 1, Valid(UInt(SbufferIndexWidth.W)))) 270 271 val candidateVec = VecInit(stateVec.map(s => s.isDcacheReqCandidate())) 272 273 val replaceAlgoIdx = plru.way(candidateVec.reverse)._2 274 val replaceAlgoNotDcacheCandidate = !stateVec(replaceAlgoIdx).isDcacheReqCandidate() 275 276 assert(!(candidateVec.asUInt.orR && replaceAlgoNotDcacheCandidate), "we have way to select, but replace algo selects invalid way") 277 278 val replaceIdx = replaceAlgoIdx 279 plru.access(accessIdx) 280 281 //-------------------------cohCount----------------------------- 282 // insert and merge: cohCount=0 283 // every cycle cohCount+=1 284 // if cohCount(EvictCountBits-1)==1, evict 285 val cohTimeOutMask = VecInit(widthMap(i => cohCount(i)(EvictCountBits - 1) && stateVec(i).isActive())) 286 val (cohTimeOutIdx, cohHasTimeOut) = PriorityEncoderWithFlag(cohTimeOutMask) 287 val cohTimeOutOH = PriorityEncoderOH(cohTimeOutMask) 288 val missqReplayTimeOutMask = VecInit(widthMap(i => missqReplayCount(i)(MissqReplayCountBits - 1) && stateVec(i).w_timeout)) 289 val (missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen) = PriorityEncoderWithFlag(missqReplayTimeOutMask) 290 val missqReplayHasTimeOut = GatedValidRegNext(missqReplayHasTimeOutGen) && !GatedValidRegNext(sbuffer_out_s0_fire) 291 val missqReplayTimeOutIdx = RegEnable(missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen) 292 293 //-------------------------sbuffer enqueue----------------------------- 294 295 // Now sbuffer enq logic is divided into 3 stages: 296 297 // sbuffer_in_s0: 298 // * read data and meta from store queue 299 // * store them in 2 entry fifo queue 300 301 // sbuffer_in_s1: 302 // * read data and meta from fifo queue 303 // * update sbuffer meta (vtag, ptag, flag) 304 // * prevert that line from being sent to dcache (add a block condition) 305 // * prepare cacheline level write enable signal, RegNext() data and mask 306 307 // sbuffer_in_s2: 308 // * use cacheline level buffer to update sbuffer data and mask 309 // * remove dcache write block (if there is) 310 311 val activeMask = VecInit(stateVec.map(s => s.isActive())) 312 val validMask = VecInit(stateVec.map(s => s.isValid())) 313 val drainIdx = PriorityEncoder(activeMask) 314 315 val inflightMask = VecInit(stateVec.map(s => s.isInflight())) 316 317 val inptags = io.in.map(in => getPTag(in.bits.addr)) 318 val invtags = io.in.map(in => getVTag(in.bits.vaddr)) 319 val sameTag = inptags(0) === inptags(1) 320 val firstWord = getVWord(io.in(0).bits.addr) 321 val secondWord = getVWord(io.in(1).bits.addr) 322 // merge condition 323 val mergeMask = Wire(Vec(EnsbufferWidth, Vec(StoreBufferSize, Bool()))) 324 val mergeIdx = mergeMask.map(PriorityEncoder(_)) // avoid using mergeIdx for better timing 325 val canMerge = mergeMask.map(ParallelOR(_)) 326 val mergeVec = mergeMask.map(_.asUInt) 327 328 for(i <- 0 until EnsbufferWidth){ 329 mergeMask(i) := widthMap(j => 330 inptags(i) === ptag(j) && activeMask(j) 331 ) 332 assert(!(PopCount(mergeMask(i).asUInt) > 1.U && io.in(i).fire && io.in(i).bits.vecValid)) 333 } 334 335 // insert condition 336 // firstInsert: the first invalid entry 337 // if first entry canMerge or second entry has the same ptag with the first entry, 338 // secondInsert equal the first invalid entry, otherwise, the second invalid entry 339 val invalidMask = VecInit(stateVec.map(s => s.isInvalid())) 340 val evenInvalidMask = GetEvenBits(invalidMask.asUInt) 341 val oddInvalidMask = GetOddBits(invalidMask.asUInt) 342 343 def getFirstOneOH(input: UInt): UInt = { 344 assert(input.getWidth > 1) 345 val output = WireInit(VecInit(input.asBools)) 346 (1 until input.getWidth).map(i => { 347 output(i) := !input(i - 1, 0).orR && input(i) 348 }) 349 output.asUInt 350 } 351 352 val evenRawInsertVec = getFirstOneOH(evenInvalidMask) 353 val oddRawInsertVec = getFirstOneOH(oddInvalidMask) 354 val (evenRawInsertIdx, evenCanInsert) = PriorityEncoderWithFlag(evenInvalidMask) 355 val (oddRawInsertIdx, oddCanInsert) = PriorityEncoderWithFlag(oddInvalidMask) 356 val evenInsertIdx = Cat(evenRawInsertIdx, 0.U(1.W)) // slow to generate, for debug only 357 val oddInsertIdx = Cat(oddRawInsertIdx, 1.U(1.W)) // slow to generate, for debug only 358 val evenInsertVec = GetEvenBits.reverse(evenRawInsertVec) 359 val oddInsertVec = GetOddBits.reverse(oddRawInsertVec) 360 361 val enbufferSelReg = RegInit(false.B) 362 when(io.in(0).valid) { 363 enbufferSelReg := ~enbufferSelReg 364 } 365 366 val firstInsertIdx = Mux(enbufferSelReg, evenInsertIdx, oddInsertIdx) // slow to generate, for debug only 367 val secondInsertIdx = Mux(sameTag, 368 firstInsertIdx, 369 Mux(~enbufferSelReg, evenInsertIdx, oddInsertIdx) 370 ) // slow to generate, for debug only 371 val firstInsertVec = Mux(enbufferSelReg, evenInsertVec, oddInsertVec) 372 val secondInsertVec = Mux(sameTag, 373 firstInsertVec, 374 Mux(~enbufferSelReg, evenInsertVec, oddInsertVec) 375 ) // slow to generate, for debug only 376 val firstCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(enbufferSelReg, evenCanInsert, oddCanInsert) 377 val secondCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(sameTag, 378 firstCanInsert, 379 Mux(~enbufferSelReg, evenCanInsert, oddCanInsert) 380 ) && (EnsbufferWidth >= 1).B 381 val forward_need_uarch_drain = WireInit(false.B) 382 val merge_need_uarch_drain = WireInit(false.B) 383 val do_uarch_drain = GatedValidRegNext(forward_need_uarch_drain) || GatedValidRegNext(GatedValidRegNext(merge_need_uarch_drain)) 384 XSPerfAccumulate("do_uarch_drain", do_uarch_drain) 385 386 io.in(0).ready := firstCanInsert 387 io.in(1).ready := secondCanInsert && io.in(0).ready 388 389 for (i <- 0 until EnsbufferWidth) { 390 // train 391 if (EnableStorePrefetchSPB) { 392 prefetcher.io.sbuffer_enq(i).valid := io.in(i).fire && io.in(i).bits.vecValid 393 prefetcher.io.sbuffer_enq(i).bits := DontCare 394 prefetcher.io.sbuffer_enq(i).bits.vaddr := io.in(i).bits.vaddr 395 } else { 396 prefetcher.io.sbuffer_enq(i).valid := false.B 397 prefetcher.io.sbuffer_enq(i).bits := DontCare 398 } 399 400 // prefetch req 401 if (EnableStorePrefetchAtCommit) { 402 if (EnableAtCommitMissTrigger) { 403 io.store_prefetch(i).valid := prefetcher.io.prefetch_req(i).valid || (io.in(i).fire && io.in(i).bits.vecValid && io.in(i).bits.prefetch) 404 } else { 405 io.store_prefetch(i).valid := prefetcher.io.prefetch_req(i).valid || (io.in(i).fire && io.in(i).bits.vecValid) 406 } 407 io.store_prefetch(i).bits.paddr := DontCare 408 io.store_prefetch(i).bits.vaddr := Mux(prefetcher.io.prefetch_req(i).valid, prefetcher.io.prefetch_req(i).bits.vaddr, io.in(i).bits.vaddr) 409 prefetcher.io.prefetch_req(i).ready := io.store_prefetch(i).ready 410 } else { 411 io.store_prefetch(i) <> prefetcher.io.prefetch_req(i) 412 } 413 io.store_prefetch zip prefetcher.io.prefetch_req drop 2 foreach (x => x._1 <> x._2) 414 } 415 prefetcher.io.memSetPattenDetected := io.memSetPattenDetected 416 417 def wordReqToBufLine( // allocate a new line in sbuffer 418 req: DCacheWordReq, 419 reqptag: UInt, 420 reqvtag: UInt, 421 insertIdx: UInt, 422 insertVec: UInt, 423 wordOffset: UInt 424 ): Unit = { 425 assert(UIntToOH(insertIdx) === insertVec) 426 val sameBlockInflightMask = genSameBlockInflightMask(reqptag) 427 (0 until StoreBufferSize).map(entryIdx => { 428 when(insertVec(entryIdx)){ 429 stateVec(entryIdx).state_valid := true.B 430 stateVec(entryIdx).w_sameblock_inflight := sameBlockInflightMask.orR // set w_sameblock_inflight when a line is first allocated 431 when(sameBlockInflightMask.orR){ 432 waitInflightMask(entryIdx) := sameBlockInflightMask 433 } 434 cohCount(entryIdx) := 0.U 435 // missqReplayCount(insertIdx) := 0.U 436 ptag(entryIdx) := reqptag 437 vtag(entryIdx) := reqvtag // update vtag if a new sbuffer line is allocated 438 } 439 }) 440 } 441 442 def mergeWordReq( // merge write req into an existing line 443 req: DCacheWordReq, 444 reqptag: UInt, 445 reqvtag: UInt, 446 mergeIdx: UInt, 447 mergeVec: UInt, 448 wordOffset: UInt 449 ): Unit = { 450 assert(UIntToOH(mergeIdx) === mergeVec) 451 (0 until StoreBufferSize).map(entryIdx => { 452 when(mergeVec(entryIdx)) { 453 cohCount(entryIdx) := 0.U 454 // missqReplayCount(entryIdx) := 0.U 455 // check if vtag is the same, if not, trigger sbuffer flush 456 when(reqvtag =/= vtag(entryIdx)) { 457 XSDebug("reqvtag =/= sbufvtag req(vtag %x ptag %x) sbuffer(vtag %x ptag %x)\n", 458 reqvtag << OffsetWidth, 459 reqptag << OffsetWidth, 460 vtag(entryIdx) << OffsetWidth, 461 ptag(entryIdx) << OffsetWidth 462 ) 463 merge_need_uarch_drain := true.B 464 } 465 } 466 }) 467 } 468 469 for(((in, vwordOffset), i) <- io.in.zip(Seq(firstWord, secondWord)).zipWithIndex){ 470 writeReq(i).valid := in.fire && in.bits.vecValid 471 writeReq(i).bits.vwordOffset := vwordOffset 472 writeReq(i).bits.mask := in.bits.mask 473 writeReq(i).bits.data := in.bits.data 474 writeReq(i).bits.wline := in.bits.wline 475 val debug_insertIdx = if(i == 0) firstInsertIdx else secondInsertIdx 476 val insertVec = if(i == 0) firstInsertVec else secondInsertVec 477 assert(!((PopCount(insertVec) > 1.U) && in.fire && in.bits.vecValid)) 478 val insertIdx = OHToUInt(insertVec) 479 val accessValid = in.fire && in.bits.vecValid 480 accessIdx(i).valid := RegNext(accessValid) 481 accessIdx(i).bits := RegEnable(Mux(canMerge(i), mergeIdx(i), insertIdx), accessValid) 482 when(accessValid){ 483 when(canMerge(i)){ 484 writeReq(i).bits.wvec := mergeVec(i) 485 mergeWordReq(in.bits, inptags(i), invtags(i), mergeIdx(i), mergeVec(i), vwordOffset) 486 XSDebug(p"merge req $i to line [${mergeIdx(i)}]\n") 487 }.otherwise({ 488 writeReq(i).bits.wvec := insertVec 489 wordReqToBufLine(in.bits, inptags(i), invtags(i), insertIdx, insertVec, vwordOffset) 490 XSDebug(p"insert req $i to line[$insertIdx]\n") 491 assert(debug_insertIdx === insertIdx) 492 }) 493 } 494 } 495 496 497 for(i <- 0 until StoreBufferSize){ 498 XSDebug(stateVec(i).isValid(), 499 p"[$i] timeout:${cohCount(i)(EvictCountBits-1)} state:${stateVec(i)}\n" 500 ) 501 } 502 503 for((req, i) <- io.in.zipWithIndex){ 504 XSDebug(req.fire && req.bits.vecValid, 505 p"accept req [$i]: " + 506 p"addr:${Hexadecimal(req.bits.addr)} " + 507 p"mask:${Binary(shiftMaskToLow(req.bits.addr,req.bits.mask))} " + 508 p"data:${Hexadecimal(shiftDataToLow(req.bits.addr,req.bits.data))}\n" 509 ) 510 XSDebug(req.valid && !req.ready, 511 p"req [$i] blocked by sbuffer\n" 512 ) 513 } 514 515 // for now, when enq, trigger a prefetch (if EnableAtCommitMissTrigger) 516 require(EnsbufferWidth <= StorePipelineWidth) 517 518 // ---------------------- Send Dcache Req --------------------- 519 520 val sbuffer_empty = Cat(invalidMask).andR 521 val sq_empty = !Cat(io.in.map(_.valid)).orR 522 val empty = sbuffer_empty && sq_empty 523 val threshold = Wire(UInt(5.W)) // RegNext(io.csrCtrl.sbuffer_threshold +& 1.U) 524 threshold := Constantin.createRecord(s"StoreBufferThreshold_${p(XSCoreParamsKey).HartId}", initValue = 7) 525 val base = Wire(UInt(5.W)) 526 base := Constantin.createRecord(s"StoreBufferBase_${p(XSCoreParamsKey).HartId}", initValue = 4) 527 val ActiveCount = PopCount(activeMask) 528 val ValidCount = PopCount(validMask) 529 val forceThreshold = Mux(io.force_write, threshold - base, threshold) 530 val do_eviction = GatedValidRegNext(ActiveCount >= forceThreshold || ActiveCount === (StoreBufferSize-1).U || ValidCount === (StoreBufferSize).U, init = false.B) 531 require((StoreBufferThreshold + 1) <= StoreBufferSize) 532 533 XSDebug(p"ActiveCount[$ActiveCount]\n") 534 535 io.flush.empty := GatedValidRegNext(empty && io.sqempty) 536 // lru.io.flush := sbuffer_state === x_drain_all && empty 537 switch(sbuffer_state){ 538 is(x_idle){ 539 when(io.flush.valid){ 540 sbuffer_state := x_drain_all 541 }.elsewhen(do_uarch_drain){ 542 sbuffer_state := x_drain_sbuffer 543 }.elsewhen(do_eviction){ 544 sbuffer_state := x_replace 545 } 546 } 547 is(x_drain_all){ 548 when(empty){ 549 sbuffer_state := x_idle 550 } 551 } 552 is(x_drain_sbuffer){ 553 when(io.flush.valid){ 554 sbuffer_state := x_drain_all 555 }.elsewhen(sbuffer_empty){ 556 sbuffer_state := x_idle 557 } 558 } 559 is(x_replace){ 560 when(io.flush.valid){ 561 sbuffer_state := x_drain_all 562 }.elsewhen(do_uarch_drain){ 563 sbuffer_state := x_drain_sbuffer 564 }.elsewhen(!do_eviction){ 565 sbuffer_state := x_idle 566 } 567 } 568 } 569 XSDebug(p"sbuffer state:${sbuffer_state} do eviction:${do_eviction} empty:${empty}\n") 570 571 def noSameBlockInflight(idx: UInt): Bool = { 572 // stateVec(idx) itself must not be s_inflight 573 !Cat(widthMap(i => inflightMask(i) && ptag(idx) === ptag(i))).orR 574 } 575 576 def genSameBlockInflightMask(ptag_in: UInt): UInt = { 577 val mask = VecInit(widthMap(i => inflightMask(i) && ptag_in === ptag(i))).asUInt // quite slow, use it with care 578 assert(!(PopCount(mask) > 1.U)) 579 mask 580 } 581 582 def haveSameBlockInflight(ptag_in: UInt): Bool = { 583 genSameBlockInflightMask(ptag_in).orR 584 } 585 586 // --------------------------------------------------------------------------- 587 // sbuffer to dcache pipeline 588 // --------------------------------------------------------------------------- 589 590 // Now sbuffer deq logic is divided into 2 stages: 591 592 // sbuffer_out_s0: 593 // * read data and meta from sbuffer 594 // * RegNext() them 595 // * set line state to inflight 596 597 // sbuffer_out_s1: 598 // * send write req to dcache 599 600 // sbuffer_out_extra: 601 // * receive write result from dcache 602 // * update line state 603 604 val sbuffer_out_s1_ready = Wire(Bool()) 605 606 // --------------------------------------------------------------------------- 607 // sbuffer_out_s0 608 // --------------------------------------------------------------------------- 609 610 val need_drain = needDrain(sbuffer_state) 611 val need_replace = do_eviction || (sbuffer_state === x_replace) 612 val sbuffer_out_s0_evictionIdx = Mux(missqReplayHasTimeOut, 613 missqReplayTimeOutIdx, 614 Mux(need_drain, 615 drainIdx, 616 Mux(cohHasTimeOut, cohTimeOutIdx, replaceIdx) 617 ) 618 ) 619 620 // If there is a inflight dcache req which has same ptag with sbuffer_out_s0_evictionIdx's ptag, 621 // current eviction should be blocked. 622 val sbuffer_out_s0_valid = missqReplayHasTimeOut || 623 stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate() && 624 (need_drain || cohHasTimeOut || need_replace) 625 assert(!( 626 stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate && 627 !noSameBlockInflight(sbuffer_out_s0_evictionIdx) 628 )) 629 val sbuffer_out_s0_cango = sbuffer_out_s1_ready 630 sbuffer_out_s0_fire := sbuffer_out_s0_valid && sbuffer_out_s0_cango 631 632 // --------------------------------------------------------------------------- 633 // sbuffer_out_s1 634 // --------------------------------------------------------------------------- 635 636 // TODO: use EnsbufferWidth 637 val shouldWaitWriteFinish = GatedValidRegNext(VecInit((0 until EnsbufferWidth).map{i => 638 (writeReq(i).bits.wvec.asUInt & UIntToOH(sbuffer_out_s0_evictionIdx).asUInt).orR && 639 writeReq(i).valid 640 }).asUInt.orR) 641 // block dcache write if read / write hazard 642 val blockDcacheWrite = shouldWaitWriteFinish 643 644 val sbuffer_out_s1_valid = RegInit(false.B) 645 sbuffer_out_s1_ready := io.dcache.req.ready && !blockDcacheWrite || !sbuffer_out_s1_valid 646 val sbuffer_out_s1_fire = io.dcache.req.fire 647 648 // when sbuffer_out_s1_fire, send dcache req stored in pipeline reg to dcache 649 when(sbuffer_out_s1_fire){ 650 sbuffer_out_s1_valid := false.B 651 } 652 // when sbuffer_out_s0_fire, read dcache req data and store them in a pipeline reg 653 when(sbuffer_out_s0_cango){ 654 sbuffer_out_s1_valid := sbuffer_out_s0_valid 655 } 656 when(sbuffer_out_s0_fire){ 657 stateVec(sbuffer_out_s0_evictionIdx).state_inflight := true.B 658 stateVec(sbuffer_out_s0_evictionIdx).w_timeout := false.B 659 // stateVec(sbuffer_out_s0_evictionIdx).s_pipe_req := true.B 660 XSDebug(p"$sbuffer_out_s0_evictionIdx will be sent to Dcache\n") 661 } 662 663 XSDebug(p"need drain:$need_drain cohHasTimeOut: $cohHasTimeOut need replace:$need_replace\n") 664 XSDebug(p"drainIdx:$drainIdx tIdx:$cohTimeOutIdx replIdx:$replaceIdx " + 665 p"blocked:${!noSameBlockInflight(sbuffer_out_s0_evictionIdx)} v:${activeMask(sbuffer_out_s0_evictionIdx)}\n") 666 XSDebug(p"sbuffer_out_s0_valid:$sbuffer_out_s0_valid evictIdx:$sbuffer_out_s0_evictionIdx dcache ready:${io.dcache.req.ready}\n") 667 // Note: if other dcache req in the same block are inflight, 668 // the lru update may not accurate 669 accessIdx(EnsbufferWidth).valid := invalidMask(replaceIdx) || ( 670 need_replace && !need_drain && !cohHasTimeOut && !missqReplayHasTimeOut && sbuffer_out_s0_cango && activeMask(replaceIdx)) 671 accessIdx(EnsbufferWidth).bits := replaceIdx 672 val sbuffer_out_s1_evictionIdx = RegEnable(sbuffer_out_s0_evictionIdx, sbuffer_out_s0_fire) 673 val sbuffer_out_s1_evictionPTag = RegEnable(ptag(sbuffer_out_s0_evictionIdx), sbuffer_out_s0_fire) 674 val sbuffer_out_s1_evictionVTag = RegEnable(vtag(sbuffer_out_s0_evictionIdx), sbuffer_out_s0_fire) 675 676 io.dcache.req.valid := sbuffer_out_s1_valid && !blockDcacheWrite 677 io.dcache.req.bits := DontCare 678 io.dcache.req.bits.cmd := MemoryOpConstants.M_XWR 679 io.dcache.req.bits.addr := getAddr(sbuffer_out_s1_evictionPTag) 680 io.dcache.req.bits.vaddr := getAddr(sbuffer_out_s1_evictionVTag) 681 io.dcache.req.bits.data := data(sbuffer_out_s1_evictionIdx).asUInt 682 io.dcache.req.bits.mask := mask(sbuffer_out_s1_evictionIdx).asUInt 683 io.dcache.req.bits.id := sbuffer_out_s1_evictionIdx 684 685 when (sbuffer_out_s1_fire) { 686 assert(!(io.dcache.req.bits.vaddr === 0.U)) 687 assert(!(io.dcache.req.bits.addr === 0.U)) 688 } 689 690 XSDebug(sbuffer_out_s1_fire, 691 p"send buf [$sbuffer_out_s1_evictionIdx] to Dcache, req fire\n" 692 ) 693 694 // update sbuffer status according to dcache resp source 695 696 def id_to_sbuffer_id(id: UInt): UInt = { 697 require(id.getWidth >= log2Up(StoreBufferSize)) 698 id(log2Up(StoreBufferSize)-1, 0) 699 } 700 701 // hit resp 702 io.dcache.hit_resps.map(resp => { 703 val dcache_resp_id = resp.bits.id 704 when (resp.fire) { 705 stateVec(dcache_resp_id).state_inflight := false.B 706 stateVec(dcache_resp_id).state_valid := false.B 707 assert(!resp.bits.replay) 708 assert(!resp.bits.miss) // not need to resp if miss, to be opted 709 assert(stateVec(dcache_resp_id).state_inflight === true.B) 710 } 711 712 // Update w_sameblock_inflight flag is delayed for 1 cycle 713 // 714 // When a new req allocate a new line in sbuffer, sameblock_inflight check will ignore 715 // current dcache.hit_resps. Then, in the next cycle, we have plenty of time to check 716 // if the same block is still inflight 717 (0 until StoreBufferSize).map(i => { 718 when( 719 stateVec(i).w_sameblock_inflight && 720 stateVec(i).state_valid && 721 GatedValidRegNext(resp.fire) && 722 waitInflightMask(i) === UIntToOH(RegEnable(id_to_sbuffer_id(dcache_resp_id), resp.fire)) 723 ){ 724 stateVec(i).w_sameblock_inflight := false.B 725 } 726 }) 727 }) 728 729 io.dcache.hit_resps.zip(dataModule.io.maskFlushReq).map{case (resp, maskFlush) => { 730 maskFlush.valid := resp.fire 731 maskFlush.bits.wvec := UIntToOH(resp.bits.id) 732 }} 733 734 // replay resp 735 val replay_resp_id = io.dcache.replay_resp.bits.id 736 when (io.dcache.replay_resp.fire) { 737 missqReplayCount(replay_resp_id) := 0.U 738 stateVec(replay_resp_id).w_timeout := true.B 739 // waiting for timeout 740 assert(io.dcache.replay_resp.bits.replay) 741 assert(stateVec(replay_resp_id).state_inflight === true.B) 742 } 743 744 // TODO: reuse cohCount 745 (0 until StoreBufferSize).map(i => { 746 when(stateVec(i).w_timeout && stateVec(i).state_inflight && !missqReplayCount(i)(MissqReplayCountBits-1)) { 747 missqReplayCount(i) := missqReplayCount(i) + 1.U 748 } 749 when(activeMask(i) && !cohTimeOutMask(i)){ 750 cohCount(i) := cohCount(i)+1.U 751 } 752 }) 753 754 if (env.EnableDifftest) { 755 // hit resp 756 io.dcache.hit_resps.zipWithIndex.map{case (resp, index) => { 757 val difftest = DifftestModule(new DiffSbufferEvent, delay = 1) 758 val dcache_resp_id = resp.bits.id 759 difftest.coreid := io.hartId 760 difftest.index := index.U 761 difftest.valid := resp.fire 762 difftest.addr := getAddr(ptag(dcache_resp_id)) 763 difftest.data := data(dcache_resp_id).asTypeOf(Vec(CacheLineBytes, UInt(8.W))) 764 difftest.mask := mask(dcache_resp_id).asUInt 765 }} 766 } 767 768 // ---------------------- Load Data Forward --------------------- 769 val mismatch = Wire(Vec(LoadPipelineWidth, Bool())) 770 XSPerfAccumulate("vaddr_match_failed", mismatch(0) || mismatch(1)) 771 for ((forward, i) <- io.forward.zipWithIndex) { 772 val vtag_matches = VecInit(widthMap(w => vtag(w) === getVTag(forward.vaddr))) 773 // ptag_matches uses paddr from dtlb, which is far from sbuffer 774 val ptag_matches = VecInit(widthMap(w => RegEnable(ptag(w), forward.valid) === RegEnable(getPTag(forward.paddr), forward.valid))) 775 val tag_matches = vtag_matches 776 val tag_mismatch = GatedValidRegNext(forward.valid) && VecInit(widthMap(w => 777 GatedValidRegNext(vtag_matches(w)) =/= ptag_matches(w) && GatedValidRegNext((activeMask(w) || inflightMask(w))) 778 )).asUInt.orR 779 mismatch(i) := tag_mismatch 780 when (tag_mismatch) { 781 XSDebug("forward tag mismatch: pmatch %x vmatch %x vaddr %x paddr %x\n", 782 RegNext(ptag_matches.asUInt), 783 RegNext(vtag_matches.asUInt), 784 RegNext(forward.vaddr), 785 RegNext(forward.paddr) 786 ) 787 forward_need_uarch_drain := true.B 788 } 789 val valid_tag_matches = widthMap(w => tag_matches(w) && activeMask(w)) 790 val inflight_tag_matches = widthMap(w => tag_matches(w) && inflightMask(w)) 791 val line_offset_mask = UIntToOH(getVWordOffset(forward.paddr)) 792 793 val valid_tag_match_reg = valid_tag_matches.map(RegEnable(_, forward.valid)) 794 val inflight_tag_match_reg = inflight_tag_matches.map(RegEnable(_, forward.valid)) 795 val forward_mask_candidate_reg = RegEnable( 796 VecInit(mask.map(entry => entry(getVWordOffset(forward.paddr)))), 797 forward.valid 798 ) 799 val forward_data_candidate_reg = RegEnable( 800 VecInit(data.map(entry => entry(getVWordOffset(forward.paddr)))), 801 forward.valid 802 ) 803 804 val selectedValidMask = Mux1H(valid_tag_match_reg, forward_mask_candidate_reg) 805 val selectedValidData = Mux1H(valid_tag_match_reg, forward_data_candidate_reg) 806 selectedValidMask.suggestName("selectedValidMask_"+i) 807 selectedValidData.suggestName("selectedValidData_"+i) 808 809 val selectedInflightMask = Mux1H(inflight_tag_match_reg, forward_mask_candidate_reg) 810 val selectedInflightData = Mux1H(inflight_tag_match_reg, forward_data_candidate_reg) 811 selectedInflightMask.suggestName("selectedInflightMask_"+i) 812 selectedInflightData.suggestName("selectedInflightData_"+i) 813 814 // currently not being used 815 val selectedInflightMaskFast = Mux1H(line_offset_mask, Mux1H(inflight_tag_matches, mask).asTypeOf(Vec(CacheLineVWords, Vec(VDataBytes, Bool())))) 816 val selectedValidMaskFast = Mux1H(line_offset_mask, Mux1H(valid_tag_matches, mask).asTypeOf(Vec(CacheLineVWords, Vec(VDataBytes, Bool())))) 817 818 forward.dataInvalid := false.B // data in store line merge buffer is always ready 819 forward.matchInvalid := tag_mismatch // paddr / vaddr cam result does not match 820 for (j <- 0 until VDataBytes) { 821 forward.forwardMask(j) := false.B 822 forward.forwardData(j) := DontCare 823 824 // valid entries have higher priority than inflight entries 825 when(selectedInflightMask(j)) { 826 forward.forwardMask(j) := true.B 827 forward.forwardData(j) := selectedInflightData(j) 828 } 829 when(selectedValidMask(j)) { 830 forward.forwardMask(j) := true.B 831 forward.forwardData(j) := selectedValidData(j) 832 } 833 834 forward.forwardMaskFast(j) := selectedInflightMaskFast(j) || selectedValidMaskFast(j) 835 } 836 forward.addrInvalid := DontCare 837 } 838 839 for (i <- 0 until StoreBufferSize) { 840 XSDebug("sbf entry " + i + " : ptag %x vtag %x valid %x active %x inflight %x w_timeout %x\n", 841 ptag(i) << OffsetWidth, 842 vtag(i) << OffsetWidth, 843 stateVec(i).isValid(), 844 activeMask(i), 845 inflightMask(i), 846 stateVec(i).w_timeout 847 ) 848 } 849 850 /* 851 * 852 ********************************************************** 853 * ------------- ------------- * 854 * | XiangShan | | NEMU | * 855 * ------------- ------------- * 856 * | | * 857 * V V * 858 * ----- ----- * 859 * | Q | | Q | * 860 * | U | | U | * 861 * | E | | E | * 862 * | U | | U | * 863 * | E | | E | * 864 * | | | | * 865 * ----- ----- * 866 * | | * 867 * | -------------- | * 868 * |>>>>>>>>| DIFFTEST |<<<<<<<<<| * 869 * -------------- * 870 ********************************************************** 871 */ 872 // Initialize when unenabled difftest. 873 for (i <- 0 until EnsbufferWidth) { 874 io.vecDifftestInfo(i) := DontCare 875 } 876 if (env.EnableDifftest) { 877 val VecMemFLOWMaxNumber = 16 878 879 def UIntSlice(in: UInt, High: UInt, Low: UInt): UInt = { 880 val maxNum = in.getWidth 881 val result = Wire(Vec(maxNum, Bool())) 882 883 for (i <- 0 until maxNum) { 884 when (Low + i.U <= High) { 885 result(i) := in(Low + i.U) 886 }.otherwise{ 887 result(i) := 0.U 888 } 889 } 890 891 result.asUInt 892 } 893 894 // To align with 'nemu', we need: 895 // For 'unit-store' and 'whole' vector store instr, we re-split here, 896 // and for the res, we do nothing. 897 for (i <- 0 until EnsbufferWidth) { 898 io.vecDifftestInfo(i).ready := io.in(i).ready 899 900 val uop = io.vecDifftestInfo(i).bits 901 902 val isVse = isVStore(uop.fuType) && LSUOpType.isUStride(uop.fuOpType) 903 val isVsm = isVStore(uop.fuType) && VstuType.isMasked(uop.fuOpType) 904 val isVsr = isVStore(uop.fuType) && VstuType.isWhole(uop.fuOpType) 905 906 val vpu = uop.vpu 907 val veew = uop.vpu.veew 908 val eew = EewLog2(veew) 909 val EEB = (1.U << eew).asUInt //Only when VLEN=128 effective element byte 910 val EEWBits = (EEB << 3.U).asUInt 911 val nf = Mux(isVsr, 0.U, vpu.nf) 912 913 val isSegment = nf =/= 0.U && !isVsm 914 val isVSLine = (isVse || isVsm || isVsr) && !isSegment 915 916 // The number of stores generated by a uop theroy. 917 // No other vector instructions need to be considered. 918 val flow = Mux( 919 isVSLine, 920 (16.U >> eew).asUInt, 921 0.U 922 ) 923 924 val rawData = io.in(i).bits.data 925 val rawMask = io.in(i).bits.mask 926 val rawAddr = io.in(i).bits.addr 927 928 // A common difftest interface for scalar and vector instr 929 val difftestCommon = DifftestModule(new DiffStoreEvent, delay = 2) 930 when (isVSLine) { 931 val splitMask = UIntSlice(rawMask, EEB - 1.U, 0.U)(7,0) // Byte 932 val splitData = UIntSlice(rawData, EEWBits - 1.U, 0.U)(63,0) // Double word 933 val storeCommit = io.in(i).fire && splitMask.orR && io.in(i).bits.vecValid 934 val waddr = rawAddr 935 val wmask = splitMask 936 val wdata = splitData & MaskExpand(splitMask) 937 938 difftestCommon.coreid := io.hartId 939 difftestCommon.index := (i*VecMemFLOWMaxNumber).U 940 difftestCommon.valid := storeCommit 941 difftestCommon.addr := waddr 942 difftestCommon.data := wdata 943 difftestCommon.mask := wmask 944 945 }.otherwise{ 946 val storeCommit = io.in(i).fire 947 val waddr = ZeroExt(Cat(io.in(i).bits.addr(PAddrBits - 1, 3), 0.U(3.W)), 64) 948 val sbufferMask = shiftMaskToLow(io.in(i).bits.addr, io.in(i).bits.mask) 949 val sbufferData = shiftDataToLow(io.in(i).bits.addr, io.in(i).bits.data) 950 val wmask = sbufferMask 951 val wdata = sbufferData & MaskExpand(sbufferMask) 952 953 difftestCommon.coreid := io.hartId 954 difftestCommon.index := (i*VecMemFLOWMaxNumber).U 955 difftestCommon.valid := storeCommit && io.in(i).bits.vecValid 956 difftestCommon.addr := waddr 957 difftestCommon.data := wdata 958 difftestCommon.mask := wmask 959 960 } 961 962 // Only the interface used by the 'unit-store' and 'whole' vector store instr 963 for (index <- 1 until VecMemFLOWMaxNumber) { 964 val difftest = DifftestModule(new DiffStoreEvent, delay = 2) 965 966 // I've already done something process with 'mask' outside: 967 // Different cases of 'vm' have been considered: 968 // Any valid store will definitely not have all 0 masks, 969 // and the extra part due to unaligned access must have a mask of 0 970 when (index.U < flow && isVSLine) { 971 // Make NEMU-difftest happy 972 val shiftIndex = EEB*index.U 973 val shiftFlag = shiftIndex(2,0).orR // Double word Flag 974 val shiftBytes = Mux(shiftFlag, shiftIndex(2,0), 0.U) 975 val shiftBits = shiftBytes << 3.U 976 val splitMask = UIntSlice(rawMask, (EEB*(index+1).U - 1.U), EEB*index.U)(7,0) // Byte 977 val splitData = UIntSlice(rawData, (EEWBits*(index+1).U - 1.U), EEWBits*index.U)(63,0) // Double word 978 val storeCommit = io.in(i).fire && splitMask.orR && io.in(i).bits.vecValid 979 val waddr = Cat(rawAddr(PAddrBits - 1, 4), Cat(shiftIndex(3), 0.U(3.W))) 980 val wmask = splitMask << shiftBytes 981 val wdata = (splitData & MaskExpand(splitMask)) << shiftBits 982 983 difftest.coreid := io.hartId 984 difftest.index := (i*VecMemFLOWMaxNumber+index).U 985 difftest.valid := storeCommit 986 difftest.addr := waddr 987 difftest.data := wdata 988 difftest.mask := wmask 989 990 }.otherwise{ 991 difftest.coreid := 0.U 992 difftest.index := 0.U 993 difftest.valid := 0.U 994 difftest.addr := 0.U 995 difftest.data := 0.U 996 difftest.mask := 0.U 997 998 } 999 } 1000 } 1001 } 1002 1003 val perf_valid_entry_count = RegNext(PopCount(VecInit(stateVec.map(s => !s.isInvalid())).asUInt)) 1004 XSPerfHistogram("util", perf_valid_entry_count, true.B, 0, StoreBufferSize, 1) 1005 XSPerfAccumulate("sbuffer_req_valid", PopCount(VecInit(io.in.map(_.valid)).asUInt)) 1006 XSPerfAccumulate("sbuffer_req_fire", PopCount(VecInit(io.in.map(_.fire)).asUInt)) 1007 XSPerfAccumulate("sbuffer_req_fire_vecinvalid", PopCount(VecInit(io.in.map(data => data.fire && !data.bits.vecValid)).asUInt)) 1008 XSPerfAccumulate("sbuffer_merge", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && canMerge(i)})).asUInt)) 1009 XSPerfAccumulate("sbuffer_newline", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && !canMerge(i)})).asUInt)) 1010 XSPerfAccumulate("dcache_req_valid", io.dcache.req.valid) 1011 XSPerfAccumulate("dcache_req_fire", io.dcache.req.fire) 1012 XSPerfAccumulate("sbuffer_idle", sbuffer_state === x_idle) 1013 XSPerfAccumulate("sbuffer_flush", sbuffer_state === x_drain_sbuffer) 1014 XSPerfAccumulate("sbuffer_replace", sbuffer_state === x_replace) 1015 XSPerfAccumulate("evenCanInsert", evenCanInsert) 1016 XSPerfAccumulate("oddCanInsert", oddCanInsert) 1017 XSPerfAccumulate("mainpipe_resp_valid", io.dcache.main_pipe_hit_resp.fire) 1018 //XSPerfAccumulate("refill_resp_valid", io.dcache.refill_hit_resp.fire) 1019 XSPerfAccumulate("replay_resp_valid", io.dcache.replay_resp.fire) 1020 XSPerfAccumulate("coh_timeout", cohHasTimeOut) 1021 1022 // val (store_latency_sample, store_latency) = TransactionLatencyCounter(io.lsu.req.fire, io.lsu.resp.fire) 1023 // XSPerfHistogram("store_latency", store_latency, store_latency_sample, 0, 100, 10) 1024 // XSPerfAccumulate("store_req", io.lsu.req.fire) 1025 1026 val perfEvents = Seq( 1027 ("sbuffer_req_valid ", PopCount(VecInit(io.in.map(_.valid)).asUInt) ), 1028 ("sbuffer_req_fire ", PopCount(VecInit(io.in.map(_.fire)).asUInt) ), 1029 ("sbuffer_merge ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && canMerge(i)})).asUInt) ), 1030 ("sbuffer_newline ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && !canMerge(i)})).asUInt) ), 1031 ("dcache_req_valid ", io.dcache.req.valid ), 1032 ("dcache_req_fire ", io.dcache.req.fire ), 1033 ("sbuffer_idle ", sbuffer_state === x_idle ), 1034 ("sbuffer_flush ", sbuffer_state === x_drain_sbuffer ), 1035 ("sbuffer_replace ", sbuffer_state === x_replace ), 1036 ("mpipe_resp_valid ", io.dcache.main_pipe_hit_resp.fire ), 1037 //("refill_resp_valid ", io.dcache.refill_hit_resp.fire ), 1038 ("replay_resp_valid ", io.dcache.replay_resp.fire ), 1039 ("coh_timeout ", cohHasTimeOut ), 1040 ("sbuffer_1_4_valid ", (perf_valid_entry_count < (StoreBufferSize.U/4.U)) ), 1041 ("sbuffer_2_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/4.U)) & (perf_valid_entry_count <= (StoreBufferSize.U/2.U)) ), 1042 ("sbuffer_3_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/2.U)) & (perf_valid_entry_count <= (StoreBufferSize.U*3.U/4.U))), 1043 ("sbuffer_full_valid", (perf_valid_entry_count > (StoreBufferSize.U*3.U/4.U))) 1044 ) 1045 generatePerfEvent() 1046 1047} 1048