xref: /XiangShan/src/main/scala/xiangshan/mem/sbuffer/Sbuffer.scala (revision 9ae95eda49bc8b88c6618e645cc447551f840434)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import xiangshan._
23import utils._
24import utility._
25import xiangshan.cache._
26import xiangshan.mem._
27import xiangshan.backend.Bundles.DynInst
28import difftest._
29import freechips.rocketchip.util._
30import xiangshan.backend.fu.FuType._
31
32class SbufferFlushBundle extends Bundle {
33  val valid = Output(Bool())
34  val empty = Input(Bool())
35}
36
37trait HasSbufferConst extends HasXSParameter {
38  val EvictCycles = 1 << 20
39  val SbufferReplayDelayCycles = 16
40  require(isPow2(EvictCycles))
41  val EvictCountBits = log2Up(EvictCycles+1)
42  val MissqReplayCountBits = log2Up(SbufferReplayDelayCycles) + 1
43
44  // dcache write hit resp has 2 sources
45  // refill pipe resp and main pipe resp
46  val NumDcacheWriteResp = 2 // hardcoded
47
48  val SbufferIndexWidth: Int = log2Up(StoreBufferSize)
49  // paddr = ptag + offset
50  val CacheLineBytes: Int = CacheLineSize / 8
51  val CacheLineWords: Int = CacheLineBytes / DataBytes
52  val OffsetWidth: Int = log2Up(CacheLineBytes)
53  val WordsWidth: Int = log2Up(CacheLineWords)
54  val PTagWidth: Int = PAddrBits - OffsetWidth
55  val VTagWidth: Int = VAddrBits - OffsetWidth
56  val WordOffsetWidth: Int = PAddrBits - WordsWidth
57
58  val CacheLineVWords: Int = CacheLineBytes / VDataBytes
59  val VWordsWidth: Int = log2Up(CacheLineVWords)
60  val VWordWidth: Int = log2Up(VDataBytes)
61  val VWordOffsetWidth: Int = PAddrBits - VWordWidth
62}
63
64class SbufferEntryState (implicit p: Parameters) extends SbufferBundle {
65  val state_valid    = Bool() // this entry is active
66  val state_inflight = Bool() // sbuffer is trying to write this entry to dcache
67  val w_timeout = Bool() // with timeout resp, waiting for resend store pipeline req timeout
68  val w_sameblock_inflight = Bool() // same cache block dcache req is inflight
69
70  def isInvalid(): Bool = !state_valid
71  def isValid(): Bool = state_valid
72  def isActive(): Bool = state_valid && !state_inflight
73  def isInflight(): Bool = state_inflight
74  def isDcacheReqCandidate(): Bool = state_valid && !state_inflight && !w_sameblock_inflight
75}
76
77class SbufferBundle(implicit p: Parameters) extends XSBundle with HasSbufferConst
78
79class DataWriteReq(implicit p: Parameters) extends SbufferBundle {
80  // univerisal writemask
81  val wvec = UInt(StoreBufferSize.W)
82  // 2 cycle update
83  val mask = UInt((VLEN/8).W)
84  val data = UInt(VLEN.W)
85  val vwordOffset = UInt(VWordOffsetWidth.W)
86  val wline = Bool() // write full cacheline
87}
88
89class MaskFlushReq(implicit p: Parameters) extends SbufferBundle {
90  // univerisal writemask
91  val wvec = UInt(StoreBufferSize.W)
92}
93
94class SbufferData(implicit p: Parameters) extends XSModule with HasSbufferConst {
95  val io = IO(new Bundle(){
96    // update data and mask when alloc or merge
97    val writeReq = Vec(EnsbufferWidth, Flipped(ValidIO(new DataWriteReq)))
98    // clean mask when deq
99    val maskFlushReq = Vec(NumDcacheWriteResp, Flipped(ValidIO(new MaskFlushReq)))
100    val dataOut = Output(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, UInt(8.W)))))
101    val maskOut = Output(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, Bool()))))
102  })
103
104  val data = Reg(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, UInt(8.W)))))
105  // val mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
106  val mask = RegInit(
107    VecInit(Seq.fill(StoreBufferSize)(
108      VecInit(Seq.fill(CacheLineVWords)(
109        VecInit(Seq.fill(VDataBytes)(false.B))
110      ))
111    ))
112  )
113
114  // 2 cycle line mask clean
115  for(line <- 0 until StoreBufferSize){
116    val line_mask_clean_flag = RegNext(
117      io.maskFlushReq.map(a => a.valid && a.bits.wvec(line)).reduce(_ || _)
118    )
119    line_mask_clean_flag.suggestName("line_mask_clean_flag_"+line)
120    when(line_mask_clean_flag){
121      for(word <- 0 until CacheLineVWords){
122        for(byte <- 0 until VDataBytes){
123          mask(line)(word)(byte) := false.B
124        }
125      }
126    }
127  }
128
129  // 2 cycle data / mask update
130  for(i <- 0 until EnsbufferWidth) {
131    val req = io.writeReq(i)
132    for(line <- 0 until StoreBufferSize){
133      val sbuffer_in_s1_line_wen = req.valid && req.bits.wvec(line)
134      val sbuffer_in_s2_line_wen = RegNext(sbuffer_in_s1_line_wen)
135      val line_write_buffer_data = RegEnable(req.bits.data, sbuffer_in_s1_line_wen)
136      val line_write_buffer_wline = RegEnable(req.bits.wline, sbuffer_in_s1_line_wen)
137      val line_write_buffer_mask = RegEnable(req.bits.mask, sbuffer_in_s1_line_wen)
138      val line_write_buffer_offset = RegEnable(req.bits.vwordOffset(VWordsWidth-1, 0), sbuffer_in_s1_line_wen)
139      sbuffer_in_s1_line_wen.suggestName("sbuffer_in_s1_line_wen_"+line)
140      sbuffer_in_s2_line_wen.suggestName("sbuffer_in_s2_line_wen_"+line)
141      line_write_buffer_data.suggestName("line_write_buffer_data_"+line)
142      line_write_buffer_wline.suggestName("line_write_buffer_wline_"+line)
143      line_write_buffer_mask.suggestName("line_write_buffer_mask_"+line)
144      line_write_buffer_offset.suggestName("line_write_buffer_offset_"+line)
145      for(word <- 0 until CacheLineVWords){
146        for(byte <- 0 until VDataBytes){
147          val write_byte = sbuffer_in_s2_line_wen && (
148            line_write_buffer_mask(byte) && (line_write_buffer_offset === word.U) ||
149            line_write_buffer_wline
150          )
151          when(write_byte){
152            data(line)(word)(byte) := line_write_buffer_data(byte*8+7, byte*8)
153            mask(line)(word)(byte) := true.B
154          }
155        }
156      }
157    }
158  }
159
160  // 1 cycle line mask clean
161  // for(i <- 0 until EnsbufferWidth) {
162  //   val req = io.writeReq(i)
163  //   when(req.valid){
164  //     for(line <- 0 until StoreBufferSize){
165  //       when(
166  //         req.bits.wvec(line) &&
167  //         req.bits.cleanMask
168  //       ){
169  //         for(word <- 0 until CacheLineWords){
170  //           for(byte <- 0 until DataBytes){
171  //             mask(line)(word)(byte) := false.B
172  //             val debug_last_cycle_write_byte = RegNext(req.valid && req.bits.wvec(line) && (
173  //               req.bits.mask(byte) && (req.bits.wordOffset(WordsWidth-1, 0) === word.U) ||
174  //               req.bits.wline
175  //             ))
176  //             assert(!debug_last_cycle_write_byte)
177  //           }
178  //         }
179  //       }
180  //     }
181  //   }
182  // }
183
184  io.dataOut := data
185  io.maskOut := mask
186}
187
188class Sbuffer(implicit p: Parameters)
189  extends DCacheModule
190    with HasSbufferConst
191    with HasPerfEvents {
192  val io = IO(new Bundle() {
193    val hartId = Input(UInt(8.W))
194    val in = Vec(EnsbufferWidth, Flipped(Decoupled(new DCacheWordReqWithVaddrAndPfFlag)))  //Todo: store logic only support Width == 2 now
195    val vecDifftestInfo = Vec(EnsbufferWidth, Flipped(Decoupled(new DynInst)))
196    val dcache = Flipped(new DCacheToSbufferIO)
197    val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
198    val sqempty = Input(Bool())
199    val flush = Flipped(new SbufferFlushBundle)
200    val csrCtrl = Flipped(new CustomCSRCtrlIO)
201    val store_prefetch = Vec(StorePipelineWidth, DecoupledIO(new StorePrefetchReq)) // to dcache
202    val memSetPattenDetected = Input(Bool())
203    val force_write = Input(Bool())
204  })
205
206  val dataModule = Module(new SbufferData)
207  dataModule.io.writeReq <> DontCare
208  val prefetcher = Module(new StorePfWrapper())
209  val writeReq = dataModule.io.writeReq
210
211  val ptag = Reg(Vec(StoreBufferSize, UInt(PTagWidth.W)))
212  val vtag = Reg(Vec(StoreBufferSize, UInt(VTagWidth.W)))
213  val debug_mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
214  val waitInflightMask = Reg(Vec(StoreBufferSize, UInt(StoreBufferSize.W)))
215  val data = dataModule.io.dataOut
216  val mask = dataModule.io.maskOut
217  val stateVec = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U.asTypeOf(new SbufferEntryState))))
218  val cohCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(EvictCountBits.W))))
219  val missqReplayCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(MissqReplayCountBits.W))))
220
221  val sbuffer_out_s0_fire = Wire(Bool())
222
223  /*
224       idle --[flush]   --> drain   --[buf empty]--> idle
225            --[buf full]--> replace --[dcache resp]--> idle
226  */
227  // x_drain_all: drain store queue and sbuffer
228  // x_drain_sbuffer: drain sbuffer only, block store queue to sbuffer write
229  val x_idle :: x_replace :: x_drain_all :: x_drain_sbuffer :: Nil = Enum(4)
230  def needDrain(state: UInt): Bool =
231    state(1)
232  val sbuffer_state = RegInit(x_idle)
233
234  // ---------------------- Store Enq Sbuffer ---------------------
235
236  def getPTag(pa: UInt): UInt =
237    pa(PAddrBits - 1, PAddrBits - PTagWidth)
238
239  def getVTag(va: UInt): UInt =
240    va(VAddrBits - 1, VAddrBits - VTagWidth)
241
242  def getWord(pa: UInt): UInt =
243    pa(PAddrBits-1, 3)
244
245  def getVWord(pa: UInt): UInt =
246    pa(PAddrBits-1, 4)
247
248  def getWordOffset(pa: UInt): UInt =
249    pa(OffsetWidth-1, 3)
250
251  def getVWordOffset(pa: UInt): UInt =
252    pa(OffsetWidth-1, 4)
253
254  def getAddr(ptag: UInt): UInt =
255    Cat(ptag, 0.U((PAddrBits - PTagWidth).W))
256
257  def getByteOffset(offect: UInt): UInt =
258    Cat(offect(OffsetWidth - 1, 3), 0.U(3.W))
259
260  def isOneOf(key: UInt, seq: Seq[UInt]): Bool =
261    if(seq.isEmpty) false.B else Cat(seq.map(_===key)).orR
262
263  def widthMap[T <: Data](f: Int => T) = (0 until StoreBufferSize) map f
264
265  // sbuffer entry count
266
267  val plru = new ValidPseudoLRU(StoreBufferSize)
268  val accessIdx = Wire(Vec(EnsbufferWidth + 1, Valid(UInt(SbufferIndexWidth.W))))
269
270  val candidateVec = VecInit(stateVec.map(s => s.isDcacheReqCandidate()))
271
272  val replaceAlgoIdx = plru.way(candidateVec.reverse)._2
273  val replaceAlgoNotDcacheCandidate = !stateVec(replaceAlgoIdx).isDcacheReqCandidate()
274
275  assert(!(candidateVec.asUInt.orR && replaceAlgoNotDcacheCandidate), "we have way to select, but replace algo selects invalid way")
276
277  val replaceIdx = replaceAlgoIdx
278  plru.access(accessIdx)
279
280  //-------------------------cohCount-----------------------------
281  // insert and merge: cohCount=0
282  // every cycle cohCount+=1
283  // if cohCount(EvictCountBits-1)==1, evict
284  val cohTimeOutMask = VecInit(widthMap(i => cohCount(i)(EvictCountBits - 1) && stateVec(i).isActive()))
285  val (cohTimeOutIdx, cohHasTimeOut) = PriorityEncoderWithFlag(cohTimeOutMask)
286  val cohTimeOutOH = PriorityEncoderOH(cohTimeOutMask)
287  val missqReplayTimeOutMask = VecInit(widthMap(i => missqReplayCount(i)(MissqReplayCountBits - 1) && stateVec(i).w_timeout))
288  val (missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen) = PriorityEncoderWithFlag(missqReplayTimeOutMask)
289  val missqReplayHasTimeOut = RegNext(missqReplayHasTimeOutGen) && !RegNext(sbuffer_out_s0_fire)
290  val missqReplayTimeOutIdx = RegEnable(missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen)
291
292  //-------------------------sbuffer enqueue-----------------------------
293
294  // Now sbuffer enq logic is divided into 3 stages:
295
296  // sbuffer_in_s0:
297  // * read data and meta from store queue
298  // * store them in 2 entry fifo queue
299
300  // sbuffer_in_s1:
301  // * read data and meta from fifo queue
302  // * update sbuffer meta (vtag, ptag, flag)
303  // * prevert that line from being sent to dcache (add a block condition)
304  // * prepare cacheline level write enable signal, RegNext() data and mask
305
306  // sbuffer_in_s2:
307  // * use cacheline level buffer to update sbuffer data and mask
308  // * remove dcache write block (if there is)
309
310  val activeMask = VecInit(stateVec.map(s => s.isActive()))
311  val validMask  = VecInit(stateVec.map(s => s.isValid()))
312  val drainIdx = PriorityEncoder(activeMask)
313
314  val inflightMask = VecInit(stateVec.map(s => s.isInflight()))
315
316  val inptags = io.in.map(in => getPTag(in.bits.addr))
317  val invtags = io.in.map(in => getVTag(in.bits.vaddr))
318  val sameTag = inptags(0) === inptags(1)
319  val firstWord = getVWord(io.in(0).bits.addr)
320  val secondWord = getVWord(io.in(1).bits.addr)
321  // merge condition
322  val mergeMask = Wire(Vec(EnsbufferWidth, Vec(StoreBufferSize, Bool())))
323  val mergeIdx = mergeMask.map(PriorityEncoder(_)) // avoid using mergeIdx for better timing
324  val canMerge = mergeMask.map(ParallelOR(_))
325  val mergeVec = mergeMask.map(_.asUInt)
326
327  for(i <- 0 until EnsbufferWidth){
328    mergeMask(i) := widthMap(j =>
329      inptags(i) === ptag(j) && activeMask(j)
330    )
331    assert(!(PopCount(mergeMask(i).asUInt) > 1.U && io.in(i).fire && io.in(i).bits.vecValid))
332  }
333
334  // insert condition
335  // firstInsert: the first invalid entry
336  // if first entry canMerge or second entry has the same ptag with the first entry,
337  // secondInsert equal the first invalid entry, otherwise, the second invalid entry
338  val invalidMask = VecInit(stateVec.map(s => s.isInvalid()))
339  val evenInvalidMask = GetEvenBits(invalidMask.asUInt)
340  val oddInvalidMask = GetOddBits(invalidMask.asUInt)
341
342  def getFirstOneOH(input: UInt): UInt = {
343    assert(input.getWidth > 1)
344    val output = WireInit(VecInit(input.asBools))
345    (1 until input.getWidth).map(i => {
346      output(i) := !input(i - 1, 0).orR && input(i)
347    })
348    output.asUInt
349  }
350
351  val evenRawInsertVec = getFirstOneOH(evenInvalidMask)
352  val oddRawInsertVec = getFirstOneOH(oddInvalidMask)
353  val (evenRawInsertIdx, evenCanInsert) = PriorityEncoderWithFlag(evenInvalidMask)
354  val (oddRawInsertIdx, oddCanInsert) = PriorityEncoderWithFlag(oddInvalidMask)
355  val evenInsertIdx = Cat(evenRawInsertIdx, 0.U(1.W)) // slow to generate, for debug only
356  val oddInsertIdx = Cat(oddRawInsertIdx, 1.U(1.W)) // slow to generate, for debug only
357  val evenInsertVec = GetEvenBits.reverse(evenRawInsertVec)
358  val oddInsertVec = GetOddBits.reverse(oddRawInsertVec)
359
360  val enbufferSelReg = RegInit(false.B)
361  when(io.in(0).valid) {
362    enbufferSelReg := ~enbufferSelReg
363  }
364
365  val firstInsertIdx = Mux(enbufferSelReg, evenInsertIdx, oddInsertIdx) // slow to generate, for debug only
366  val secondInsertIdx = Mux(sameTag,
367    firstInsertIdx,
368    Mux(~enbufferSelReg, evenInsertIdx, oddInsertIdx)
369  ) // slow to generate, for debug only
370  val firstInsertVec = Mux(enbufferSelReg, evenInsertVec, oddInsertVec)
371  val secondInsertVec = Mux(sameTag,
372    firstInsertVec,
373    Mux(~enbufferSelReg, evenInsertVec, oddInsertVec)
374  ) // slow to generate, for debug only
375  val firstCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(enbufferSelReg, evenCanInsert, oddCanInsert)
376  val secondCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(sameTag,
377    firstCanInsert,
378    Mux(~enbufferSelReg, evenCanInsert, oddCanInsert)
379  ) && (EnsbufferWidth >= 1).B
380  val forward_need_uarch_drain = WireInit(false.B)
381  val merge_need_uarch_drain = WireInit(false.B)
382  val do_uarch_drain = RegNext(forward_need_uarch_drain) || RegNext(RegNext(merge_need_uarch_drain))
383  XSPerfAccumulate("do_uarch_drain", do_uarch_drain)
384
385  io.in(0).ready := firstCanInsert
386  io.in(1).ready := secondCanInsert && io.in(0).ready
387
388  for (i <- 0 until EnsbufferWidth) {
389    // train
390    if (EnableStorePrefetchSPB) {
391      prefetcher.io.sbuffer_enq(i).valid := io.in(i).fire && io.in(i).bits.vecValid
392      prefetcher.io.sbuffer_enq(i).bits := DontCare
393      prefetcher.io.sbuffer_enq(i).bits.vaddr := io.in(i).bits.vaddr
394    } else {
395      prefetcher.io.sbuffer_enq(i).valid := false.B
396      prefetcher.io.sbuffer_enq(i).bits := DontCare
397    }
398
399    // prefetch req
400    if (EnableStorePrefetchAtCommit) {
401      if (EnableAtCommitMissTrigger) {
402        io.store_prefetch(i).valid := prefetcher.io.prefetch_req(i).valid || (io.in(i).fire && io.in(i).bits.vecValid && io.in(i).bits.prefetch)
403      } else {
404        io.store_prefetch(i).valid := prefetcher.io.prefetch_req(i).valid || (io.in(i).fire && io.in(i).bits.vecValid)
405      }
406      io.store_prefetch(i).bits.paddr := DontCare
407      io.store_prefetch(i).bits.vaddr := Mux(prefetcher.io.prefetch_req(i).valid, prefetcher.io.prefetch_req(i).bits.vaddr, io.in(i).bits.vaddr)
408      prefetcher.io.prefetch_req(i).ready := io.store_prefetch(i).ready
409    } else {
410      io.store_prefetch(i) <> prefetcher.io.prefetch_req(i)
411    }
412    io.store_prefetch zip prefetcher.io.prefetch_req drop 2 foreach (x => x._1 <> x._2)
413  }
414  prefetcher.io.memSetPattenDetected := io.memSetPattenDetected
415
416  def wordReqToBufLine( // allocate a new line in sbuffer
417    req: DCacheWordReq,
418    reqptag: UInt,
419    reqvtag: UInt,
420    insertIdx: UInt,
421    insertVec: UInt,
422    wordOffset: UInt
423  ): Unit = {
424    assert(UIntToOH(insertIdx) === insertVec)
425    val sameBlockInflightMask = genSameBlockInflightMask(reqptag)
426    (0 until StoreBufferSize).map(entryIdx => {
427      when(insertVec(entryIdx)){
428        stateVec(entryIdx).state_valid := true.B
429        stateVec(entryIdx).w_sameblock_inflight := sameBlockInflightMask.orR // set w_sameblock_inflight when a line is first allocated
430        when(sameBlockInflightMask.orR){
431          waitInflightMask(entryIdx) := sameBlockInflightMask
432        }
433        cohCount(entryIdx) := 0.U
434        // missqReplayCount(insertIdx) := 0.U
435        ptag(entryIdx) := reqptag
436        vtag(entryIdx) := reqvtag // update vtag if a new sbuffer line is allocated
437      }
438    })
439  }
440
441  def mergeWordReq( // merge write req into an existing line
442    req: DCacheWordReq,
443    reqptag: UInt,
444    reqvtag: UInt,
445    mergeIdx: UInt,
446    mergeVec: UInt,
447    wordOffset: UInt
448  ): Unit = {
449    assert(UIntToOH(mergeIdx) === mergeVec)
450    (0 until StoreBufferSize).map(entryIdx => {
451      when(mergeVec(entryIdx)) {
452        cohCount(entryIdx) := 0.U
453        // missqReplayCount(entryIdx) := 0.U
454        // check if vtag is the same, if not, trigger sbuffer flush
455        when(reqvtag =/= vtag(entryIdx)) {
456          XSDebug("reqvtag =/= sbufvtag req(vtag %x ptag %x) sbuffer(vtag %x ptag %x)\n",
457            reqvtag << OffsetWidth,
458            reqptag << OffsetWidth,
459            vtag(entryIdx) << OffsetWidth,
460            ptag(entryIdx) << OffsetWidth
461          )
462          merge_need_uarch_drain := true.B
463        }
464      }
465    })
466  }
467
468  for(((in, vwordOffset), i) <- io.in.zip(Seq(firstWord, secondWord)).zipWithIndex){
469    writeReq(i).valid := in.fire && in.bits.vecValid
470    writeReq(i).bits.vwordOffset := vwordOffset
471    writeReq(i).bits.mask := in.bits.mask
472    writeReq(i).bits.data := in.bits.data
473    writeReq(i).bits.wline := in.bits.wline
474    val debug_insertIdx = if(i == 0) firstInsertIdx else secondInsertIdx
475    val insertVec = if(i == 0) firstInsertVec else secondInsertVec
476    assert(!((PopCount(insertVec) > 1.U) && in.fire && in.bits.vecValid))
477    val insertIdx = OHToUInt(insertVec)
478    accessIdx(i).valid := RegNext(in.fire && in.bits.vecValid)
479    accessIdx(i).bits := RegNext(Mux(canMerge(i), mergeIdx(i), insertIdx))
480    when(in.fire && in.bits.vecValid){
481      when(canMerge(i)){
482        writeReq(i).bits.wvec := mergeVec(i)
483        mergeWordReq(in.bits, inptags(i), invtags(i), mergeIdx(i), mergeVec(i), vwordOffset)
484        XSDebug(p"merge req $i to line [${mergeIdx(i)}]\n")
485      }.otherwise({
486        writeReq(i).bits.wvec := insertVec
487        wordReqToBufLine(in.bits, inptags(i), invtags(i), insertIdx, insertVec, vwordOffset)
488        XSDebug(p"insert req $i to line[$insertIdx]\n")
489        assert(debug_insertIdx === insertIdx)
490      })
491    }
492  }
493
494
495  for(i <- 0 until StoreBufferSize){
496    XSDebug(stateVec(i).isValid(),
497      p"[$i] timeout:${cohCount(i)(EvictCountBits-1)} state:${stateVec(i)}\n"
498    )
499  }
500
501  for((req, i) <- io.in.zipWithIndex){
502    XSDebug(req.fire && req.bits.vecValid,
503      p"accept req [$i]: " +
504        p"addr:${Hexadecimal(req.bits.addr)} " +
505        p"mask:${Binary(shiftMaskToLow(req.bits.addr,req.bits.mask))} " +
506        p"data:${Hexadecimal(shiftDataToLow(req.bits.addr,req.bits.data))}\n"
507    )
508    XSDebug(req.valid && !req.ready,
509      p"req [$i] blocked by sbuffer\n"
510    )
511  }
512
513  // for now, when enq, trigger a prefetch (if EnableAtCommitMissTrigger)
514  require(EnsbufferWidth <= StorePipelineWidth)
515
516  // ---------------------- Send Dcache Req ---------------------
517
518  val sbuffer_empty = Cat(invalidMask).andR
519  val sq_empty = !Cat(io.in.map(_.valid)).orR
520  val empty = sbuffer_empty && sq_empty
521  val threshold = Wire(UInt(5.W)) // RegNext(io.csrCtrl.sbuffer_threshold +& 1.U)
522  threshold := Constantin.createRecord("StoreBufferThreshold_"+p(XSCoreParamsKey).HartId.toString(), initValue = 7.U)
523  val base = Wire(UInt(5.W))
524  base := Constantin.createRecord("StoreBufferBase_"+p(XSCoreParamsKey).HartId.toString(), initValue = 4.U)
525  val ActiveCount = PopCount(activeMask)
526  val ValidCount = PopCount(validMask)
527  val forceThreshold = Mux(io.force_write, threshold - base, threshold)
528  val do_eviction = RegNext(ActiveCount >= forceThreshold || ActiveCount === (StoreBufferSize-1).U || ValidCount === (StoreBufferSize).U, init = false.B)
529  require((StoreBufferThreshold + 1) <= StoreBufferSize)
530
531  XSDebug(p"ActiveCount[$ActiveCount]\n")
532
533  io.flush.empty := RegNext(empty && io.sqempty)
534  // lru.io.flush := sbuffer_state === x_drain_all && empty
535  switch(sbuffer_state){
536    is(x_idle){
537      when(io.flush.valid){
538        sbuffer_state := x_drain_all
539      }.elsewhen(do_uarch_drain){
540        sbuffer_state := x_drain_sbuffer
541      }.elsewhen(do_eviction){
542        sbuffer_state := x_replace
543      }
544    }
545    is(x_drain_all){
546      when(empty){
547        sbuffer_state := x_idle
548      }
549    }
550    is(x_drain_sbuffer){
551      when(io.flush.valid){
552        sbuffer_state := x_drain_all
553      }.elsewhen(sbuffer_empty){
554        sbuffer_state := x_idle
555      }
556    }
557    is(x_replace){
558      when(io.flush.valid){
559        sbuffer_state := x_drain_all
560      }.elsewhen(do_uarch_drain){
561        sbuffer_state := x_drain_sbuffer
562      }.elsewhen(!do_eviction){
563        sbuffer_state := x_idle
564      }
565    }
566  }
567  XSDebug(p"sbuffer state:${sbuffer_state} do eviction:${do_eviction} empty:${empty}\n")
568
569  def noSameBlockInflight(idx: UInt): Bool = {
570    // stateVec(idx) itself must not be s_inflight
571    !Cat(widthMap(i => inflightMask(i) && ptag(idx) === ptag(i))).orR
572  }
573
574  def genSameBlockInflightMask(ptag_in: UInt): UInt = {
575    val mask = VecInit(widthMap(i => inflightMask(i) && ptag_in === ptag(i))).asUInt // quite slow, use it with care
576    assert(!(PopCount(mask) > 1.U))
577    mask
578  }
579
580  def haveSameBlockInflight(ptag_in: UInt): Bool = {
581    genSameBlockInflightMask(ptag_in).orR
582  }
583
584  // ---------------------------------------------------------------------------
585  // sbuffer to dcache pipeline
586  // ---------------------------------------------------------------------------
587
588  // Now sbuffer deq logic is divided into 2 stages:
589
590  // sbuffer_out_s0:
591  // * read data and meta from sbuffer
592  // * RegNext() them
593  // * set line state to inflight
594
595  // sbuffer_out_s1:
596  // * send write req to dcache
597
598  // sbuffer_out_extra:
599  // * receive write result from dcache
600  // * update line state
601
602  val sbuffer_out_s1_ready = Wire(Bool())
603
604  // ---------------------------------------------------------------------------
605  // sbuffer_out_s0
606  // ---------------------------------------------------------------------------
607
608  val need_drain = needDrain(sbuffer_state)
609  val need_replace = do_eviction || (sbuffer_state === x_replace)
610  val sbuffer_out_s0_evictionIdx = Mux(missqReplayHasTimeOut,
611    missqReplayTimeOutIdx,
612    Mux(need_drain,
613      drainIdx,
614      Mux(cohHasTimeOut, cohTimeOutIdx, replaceIdx)
615    )
616  )
617
618  // If there is a inflight dcache req which has same ptag with sbuffer_out_s0_evictionIdx's ptag,
619  // current eviction should be blocked.
620  val sbuffer_out_s0_valid = missqReplayHasTimeOut ||
621    stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate() &&
622    (need_drain || cohHasTimeOut || need_replace)
623  assert(!(
624    stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate &&
625    !noSameBlockInflight(sbuffer_out_s0_evictionIdx)
626  ))
627  val sbuffer_out_s0_cango = sbuffer_out_s1_ready
628  sbuffer_out_s0_fire := sbuffer_out_s0_valid && sbuffer_out_s0_cango
629
630  // ---------------------------------------------------------------------------
631  // sbuffer_out_s1
632  // ---------------------------------------------------------------------------
633
634  // TODO: use EnsbufferWidth
635  val shouldWaitWriteFinish = RegNext(VecInit((0 until EnsbufferWidth).map{i =>
636    (writeReq(i).bits.wvec.asUInt & UIntToOH(sbuffer_out_s0_evictionIdx).asUInt).orR &&
637    writeReq(i).valid
638  }).asUInt.orR)
639  // block dcache write if read / write hazard
640  val blockDcacheWrite = shouldWaitWriteFinish
641
642  val sbuffer_out_s1_valid = RegInit(false.B)
643  sbuffer_out_s1_ready := io.dcache.req.ready && !blockDcacheWrite || !sbuffer_out_s1_valid
644  val sbuffer_out_s1_fire = io.dcache.req.fire
645
646  // when sbuffer_out_s1_fire, send dcache req stored in pipeline reg to dcache
647  when(sbuffer_out_s1_fire){
648    sbuffer_out_s1_valid := false.B
649  }
650  // when sbuffer_out_s0_fire, read dcache req data and store them in a pipeline reg
651  when(sbuffer_out_s0_cango){
652    sbuffer_out_s1_valid := sbuffer_out_s0_valid
653  }
654  when(sbuffer_out_s0_fire){
655    stateVec(sbuffer_out_s0_evictionIdx).state_inflight := true.B
656    stateVec(sbuffer_out_s0_evictionIdx).w_timeout := false.B
657    // stateVec(sbuffer_out_s0_evictionIdx).s_pipe_req := true.B
658    XSDebug(p"$sbuffer_out_s0_evictionIdx will be sent to Dcache\n")
659  }
660
661  XSDebug(p"need drain:$need_drain cohHasTimeOut: $cohHasTimeOut need replace:$need_replace\n")
662  XSDebug(p"drainIdx:$drainIdx tIdx:$cohTimeOutIdx replIdx:$replaceIdx " +
663    p"blocked:${!noSameBlockInflight(sbuffer_out_s0_evictionIdx)} v:${activeMask(sbuffer_out_s0_evictionIdx)}\n")
664  XSDebug(p"sbuffer_out_s0_valid:$sbuffer_out_s0_valid evictIdx:$sbuffer_out_s0_evictionIdx dcache ready:${io.dcache.req.ready}\n")
665  // Note: if other dcache req in the same block are inflight,
666  // the lru update may not accurate
667  accessIdx(EnsbufferWidth).valid := invalidMask(replaceIdx) || (
668    need_replace && !need_drain && !cohHasTimeOut && !missqReplayHasTimeOut && sbuffer_out_s0_cango && activeMask(replaceIdx))
669  accessIdx(EnsbufferWidth).bits := replaceIdx
670  val sbuffer_out_s1_evictionIdx = RegEnable(sbuffer_out_s0_evictionIdx, sbuffer_out_s0_fire)
671  val sbuffer_out_s1_evictionPTag = RegEnable(ptag(sbuffer_out_s0_evictionIdx), sbuffer_out_s0_fire)
672  val sbuffer_out_s1_evictionVTag = RegEnable(vtag(sbuffer_out_s0_evictionIdx), sbuffer_out_s0_fire)
673
674  io.dcache.req.valid := sbuffer_out_s1_valid && !blockDcacheWrite
675  io.dcache.req.bits := DontCare
676  io.dcache.req.bits.cmd   := MemoryOpConstants.M_XWR
677  io.dcache.req.bits.addr  := getAddr(sbuffer_out_s1_evictionPTag)
678  io.dcache.req.bits.vaddr := getAddr(sbuffer_out_s1_evictionVTag)
679  io.dcache.req.bits.data  := data(sbuffer_out_s1_evictionIdx).asUInt
680  io.dcache.req.bits.mask  := mask(sbuffer_out_s1_evictionIdx).asUInt
681  io.dcache.req.bits.id := sbuffer_out_s1_evictionIdx
682
683  when (sbuffer_out_s1_fire) {
684    assert(!(io.dcache.req.bits.vaddr === 0.U))
685    assert(!(io.dcache.req.bits.addr === 0.U))
686  }
687
688  XSDebug(sbuffer_out_s1_fire,
689    p"send buf [$sbuffer_out_s1_evictionIdx] to Dcache, req fire\n"
690  )
691
692  // update sbuffer status according to dcache resp source
693
694  def id_to_sbuffer_id(id: UInt): UInt = {
695    require(id.getWidth >= log2Up(StoreBufferSize))
696    id(log2Up(StoreBufferSize)-1, 0)
697  }
698
699  // hit resp
700  io.dcache.hit_resps.map(resp => {
701    val dcache_resp_id = resp.bits.id
702    when (resp.fire) {
703      stateVec(dcache_resp_id).state_inflight := false.B
704      stateVec(dcache_resp_id).state_valid := false.B
705      assert(!resp.bits.replay)
706      assert(!resp.bits.miss) // not need to resp if miss, to be opted
707      assert(stateVec(dcache_resp_id).state_inflight === true.B)
708    }
709
710    // Update w_sameblock_inflight flag is delayed for 1 cycle
711    //
712    // When a new req allocate a new line in sbuffer, sameblock_inflight check will ignore
713    // current dcache.hit_resps. Then, in the next cycle, we have plenty of time to check
714    // if the same block is still inflight
715    (0 until StoreBufferSize).map(i => {
716      when(
717        stateVec(i).w_sameblock_inflight &&
718        stateVec(i).state_valid &&
719        RegNext(resp.fire) &&
720        waitInflightMask(i) === UIntToOH(RegNext(id_to_sbuffer_id(dcache_resp_id)))
721      ){
722        stateVec(i).w_sameblock_inflight := false.B
723      }
724    })
725  })
726
727  io.dcache.hit_resps.zip(dataModule.io.maskFlushReq).map{case (resp, maskFlush) => {
728    maskFlush.valid := resp.fire
729    maskFlush.bits.wvec := UIntToOH(resp.bits.id)
730  }}
731
732  // replay resp
733  val replay_resp_id = io.dcache.replay_resp.bits.id
734  when (io.dcache.replay_resp.fire) {
735    missqReplayCount(replay_resp_id) := 0.U
736    stateVec(replay_resp_id).w_timeout := true.B
737    // waiting for timeout
738    assert(io.dcache.replay_resp.bits.replay)
739    assert(stateVec(replay_resp_id).state_inflight === true.B)
740  }
741
742  // TODO: reuse cohCount
743  (0 until StoreBufferSize).map(i => {
744    when(stateVec(i).w_timeout && stateVec(i).state_inflight && !missqReplayCount(i)(MissqReplayCountBits-1)) {
745      missqReplayCount(i) := missqReplayCount(i) + 1.U
746    }
747    when(activeMask(i) && !cohTimeOutMask(i)){
748      cohCount(i) := cohCount(i)+1.U
749    }
750  })
751
752  if (env.EnableDifftest) {
753    // hit resp
754    io.dcache.hit_resps.zipWithIndex.map{case (resp, index) => {
755      val difftest = DifftestModule(new DiffSbufferEvent, delay = 1)
756      val dcache_resp_id = resp.bits.id
757      difftest.coreid := io.hartId
758      difftest.index  := index.U
759      difftest.valid  := resp.fire
760      difftest.addr   := getAddr(ptag(dcache_resp_id))
761      difftest.data   := data(dcache_resp_id).asTypeOf(Vec(CacheLineBytes, UInt(8.W)))
762      difftest.mask   := mask(dcache_resp_id).asUInt
763    }}
764  }
765
766  // ---------------------- Load Data Forward ---------------------
767  val mismatch = Wire(Vec(LoadPipelineWidth, Bool()))
768  XSPerfAccumulate("vaddr_match_failed", mismatch(0) || mismatch(1))
769  for ((forward, i) <- io.forward.zipWithIndex) {
770    val vtag_matches = VecInit(widthMap(w => vtag(w) === getVTag(forward.vaddr)))
771    // ptag_matches uses paddr from dtlb, which is far from sbuffer
772    val ptag_matches = VecInit(widthMap(w => RegEnable(ptag(w), forward.valid) === RegEnable(getPTag(forward.paddr), forward.valid)))
773    val tag_matches = vtag_matches
774    val tag_mismatch = RegNext(forward.valid) && VecInit(widthMap(w =>
775      RegNext(vtag_matches(w)) =/= ptag_matches(w) && RegNext((activeMask(w) || inflightMask(w)))
776    )).asUInt.orR
777    mismatch(i) := tag_mismatch
778    when (tag_mismatch) {
779      XSDebug("forward tag mismatch: pmatch %x vmatch %x vaddr %x paddr %x\n",
780        RegNext(ptag_matches.asUInt),
781        RegNext(vtag_matches.asUInt),
782        RegNext(forward.vaddr),
783        RegNext(forward.paddr)
784      )
785      forward_need_uarch_drain := true.B
786    }
787    val valid_tag_matches = widthMap(w => tag_matches(w) && activeMask(w))
788    val inflight_tag_matches = widthMap(w => tag_matches(w) && inflightMask(w))
789    val line_offset_mask = UIntToOH(getVWordOffset(forward.paddr))
790
791    val valid_tag_match_reg = valid_tag_matches.map(RegNext(_))
792    val inflight_tag_match_reg = inflight_tag_matches.map(RegNext(_))
793    val line_offset_reg = RegNext(line_offset_mask)
794    val forward_mask_candidate_reg = RegEnable(
795      VecInit(mask.map(entry => entry(getVWordOffset(forward.paddr)))),
796      forward.valid
797    )
798    val forward_data_candidate_reg = RegEnable(
799      VecInit(data.map(entry => entry(getVWordOffset(forward.paddr)))),
800      forward.valid
801    )
802
803    val selectedValidMask = Mux1H(valid_tag_match_reg, forward_mask_candidate_reg)
804    val selectedValidData = Mux1H(valid_tag_match_reg, forward_data_candidate_reg)
805    selectedValidMask.suggestName("selectedValidMask_"+i)
806    selectedValidData.suggestName("selectedValidData_"+i)
807
808    val selectedInflightMask = Mux1H(inflight_tag_match_reg, forward_mask_candidate_reg)
809    val selectedInflightData = Mux1H(inflight_tag_match_reg, forward_data_candidate_reg)
810    selectedInflightMask.suggestName("selectedInflightMask_"+i)
811    selectedInflightData.suggestName("selectedInflightData_"+i)
812
813    // currently not being used
814    val selectedInflightMaskFast = Mux1H(line_offset_mask, Mux1H(inflight_tag_matches, mask).asTypeOf(Vec(CacheLineVWords, Vec(VDataBytes, Bool()))))
815    val selectedValidMaskFast = Mux1H(line_offset_mask, Mux1H(valid_tag_matches, mask).asTypeOf(Vec(CacheLineVWords, Vec(VDataBytes, Bool()))))
816
817    forward.dataInvalid := false.B // data in store line merge buffer is always ready
818    forward.matchInvalid := tag_mismatch // paddr / vaddr cam result does not match
819    for (j <- 0 until VDataBytes) {
820      forward.forwardMask(j) := false.B
821      forward.forwardData(j) := DontCare
822
823      // valid entries have higher priority than inflight entries
824      when(selectedInflightMask(j)) {
825        forward.forwardMask(j) := true.B
826        forward.forwardData(j) := selectedInflightData(j)
827      }
828      when(selectedValidMask(j)) {
829        forward.forwardMask(j) := true.B
830        forward.forwardData(j) := selectedValidData(j)
831      }
832
833      forward.forwardMaskFast(j) := selectedInflightMaskFast(j) || selectedValidMaskFast(j)
834    }
835    forward.addrInvalid := DontCare
836  }
837
838  for (i <- 0 until StoreBufferSize) {
839    XSDebug("sbf entry " + i + " : ptag %x vtag %x valid %x active %x inflight %x w_timeout %x\n",
840      ptag(i) << OffsetWidth,
841      vtag(i) << OffsetWidth,
842      stateVec(i).isValid(),
843      activeMask(i),
844      inflightMask(i),
845      stateVec(i).w_timeout
846    )
847  }
848
849  /*
850  *
851  **********************************************************
852  *      -------------                   -------------     *
853  *      | XiangShan |                   |    NEMU   |     *
854  *      -------------                   -------------     *
855  *            |                               |           *
856  *            V                               V           *
857  *          -----                           -----         *
858  *          | Q |                           | Q |         *
859  *          | U |                           | U |         *
860  *          | E |                           | E |         *
861  *          | U |                           | U |         *
862  *          | E |                           | E |         *
863  *          |   |                           |   |         *
864  *          -----                           -----         *
865  *            |                               |           *
866  *            |        --------------         |           *
867  *            |>>>>>>>>|  DIFFTEST  |<<<<<<<<<|           *
868  *                     --------------                     *
869  **********************************************************
870  */
871  if (env.EnableDifftest) {
872    val VecMemFLOWMaxNumber = 16
873
874    def UIntSlice(in: UInt, High: UInt, Low: UInt): UInt = {
875      val maxNum = in.getWidth
876      val result = Wire(Vec(maxNum, Bool()))
877
878      for (i <- 0 until maxNum) {
879        when (Low + i.U <= High) {
880          result(i) := in(Low + i.U)
881        }.otherwise{
882          result(i) := 0.U
883        }
884      }
885
886      result.asUInt
887    }
888
889    // To align with 'nemu', we need:
890    //  For 'unit-store' and 'whole' vector store instr, we re-split here,
891    //  and for the res, we do nothing.
892    for (i <- 0 until EnsbufferWidth) {
893      io.vecDifftestInfo(i).ready := io.in(i).ready
894
895      val uop             = io.vecDifftestInfo(i).bits
896
897      val isVse           = isVStore(uop.fuType) && LSUOpType.isUStride(uop.fuOpType)
898      val isVsm           = isVStore(uop.fuType) && VstuType.isMasked(uop.fuOpType)
899      val isVsr           = isVStore(uop.fuType) && VstuType.isWhole(uop.fuOpType)
900
901      val vpu             = uop.vpu
902      val veew            = uop.vpu.veew
903      val eew             = EewLog2(veew)
904      val EEB             = (1.U << eew).asUInt //Only when VLEN=128 effective element byte
905      val EEWBits         = (EEB << 3.U).asUInt
906      val nf              = Mux(isVsr, 0.U, vpu.nf)
907
908      val isSegment       = nf =/= 0.U && !isVsm
909      val isVSLine        = (isVse || isVsm || isVsr) && !isSegment
910
911      // The number of stores generated by a uop theroy.
912      // No other vector instructions need to be considered.
913      val flow            = Mux(
914                              isVSLine,
915                              (16.U >> eew).asUInt,
916                              0.U
917                            )
918
919      val rawData         = io.in(i).bits.data
920      val rawMask         = io.in(i).bits.mask
921      val rawAddr         = io.in(i).bits.addr
922
923      // A common difftest interface for scalar and vector instr
924      val difftestCommon = DifftestModule(new DiffStoreEvent, delay = 2)
925      when (isVSLine) {
926        val splitMask         = UIntSlice(rawMask, EEB - 1.U, 0.U)(7,0)  // Byte
927        val splitData         = UIntSlice(rawData, EEWBits - 1.U, 0.U)(63,0) // Double word
928        val storeCommit       = io.in(i).fire && splitMask.orR && io.in(i).bits.vecValid
929        val waddr             = rawAddr
930        val wmask             = splitMask
931        val wdata             = splitData & MaskExpand(splitMask)
932
933        difftestCommon.coreid := io.hartId
934        difftestCommon.index  := (i*VecMemFLOWMaxNumber).U
935        difftestCommon.valid  := storeCommit
936        difftestCommon.addr   := waddr
937        difftestCommon.data   := wdata
938        difftestCommon.mask   := wmask
939
940      }.otherwise{
941        val storeCommit       = io.in(i).fire
942        val waddr             = ZeroExt(Cat(io.in(i).bits.addr(PAddrBits - 1, 3), 0.U(3.W)), 64)
943        val sbufferMask       = shiftMaskToLow(io.in(i).bits.addr, io.in(i).bits.mask)
944        val sbufferData       = shiftDataToLow(io.in(i).bits.addr, io.in(i).bits.data)
945        val wmask             = sbufferMask
946        val wdata             = sbufferData & MaskExpand(sbufferMask)
947
948        difftestCommon.coreid := io.hartId
949        difftestCommon.index  := (i*VecMemFLOWMaxNumber).U
950        difftestCommon.valid  := storeCommit && io.in(i).bits.vecValid
951        difftestCommon.addr   := waddr
952        difftestCommon.data   := wdata
953        difftestCommon.mask   := wmask
954
955      }
956
957      // Only the interface used by the 'unit-store' and 'whole' vector store instr
958      for (index <- 1 until VecMemFLOWMaxNumber) {
959        val difftest = DifftestModule(new DiffStoreEvent, delay = 2)
960
961        // I've already done something process with 'mask' outside:
962        //  Different cases of 'vm' have been considered:
963        //    Any valid store will definitely not have all 0 masks,
964        //    and the extra part due to unaligned access must have a mask of 0
965        when (index.U < flow && isVSLine) {
966          // Make NEMU-difftest happy
967          val shiftIndex  = EEB*index.U
968          val shiftFlag   = shiftIndex(2,0).orR // Double word Flag
969          val shiftBytes  = Mux(shiftFlag, shiftIndex(2,0), 0.U)
970          val shiftBits   = shiftBytes << 3.U
971          val splitMask   = UIntSlice(rawMask, (EEB*(index+1).U - 1.U), EEB*index.U)(7,0)  // Byte
972          val splitData   = UIntSlice(rawData, (EEWBits*(index+1).U - 1.U), EEWBits*index.U)(63,0) // Double word
973          val storeCommit = io.in(i).fire && splitMask.orR  && io.in(i).bits.vecValid
974          val waddr       = Cat(rawAddr(PAddrBits - 1, 4), Cat(shiftIndex(3), 0.U(3.W)))
975          val wmask       = splitMask << shiftBytes
976          val wdata       = (splitData & MaskExpand(splitMask)) << shiftBits
977
978          difftest.coreid := io.hartId
979          difftest.index  := (i*VecMemFLOWMaxNumber+index).U
980          difftest.valid  := storeCommit
981          difftest.addr   := waddr
982          difftest.data   := wdata
983          difftest.mask   := wmask
984
985        }.otherwise{
986          difftest.coreid := 0.U
987          difftest.index  := 0.U
988          difftest.valid  := 0.U
989          difftest.addr   := 0.U
990          difftest.data   := 0.U
991          difftest.mask   := 0.U
992
993        }
994      }
995    }
996  }
997
998  val perf_valid_entry_count = RegNext(PopCount(VecInit(stateVec.map(s => !s.isInvalid())).asUInt))
999  XSPerfHistogram("util", perf_valid_entry_count, true.B, 0, StoreBufferSize, 1)
1000  XSPerfAccumulate("sbuffer_req_valid", PopCount(VecInit(io.in.map(_.valid)).asUInt))
1001  XSPerfAccumulate("sbuffer_req_fire", PopCount(VecInit(io.in.map(_.fire)).asUInt))
1002  XSPerfAccumulate("sbuffer_req_fire_vecinvalid", PopCount(VecInit(io.in.map(data => data.fire && !data.bits.vecValid)).asUInt))
1003  XSPerfAccumulate("sbuffer_merge", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && canMerge(i)})).asUInt))
1004  XSPerfAccumulate("sbuffer_newline", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && !canMerge(i)})).asUInt))
1005  XSPerfAccumulate("dcache_req_valid", io.dcache.req.valid)
1006  XSPerfAccumulate("dcache_req_fire", io.dcache.req.fire)
1007  XSPerfAccumulate("sbuffer_idle", sbuffer_state === x_idle)
1008  XSPerfAccumulate("sbuffer_flush", sbuffer_state === x_drain_sbuffer)
1009  XSPerfAccumulate("sbuffer_replace", sbuffer_state === x_replace)
1010  XSPerfAccumulate("evenCanInsert", evenCanInsert)
1011  XSPerfAccumulate("oddCanInsert", oddCanInsert)
1012  XSPerfAccumulate("mainpipe_resp_valid", io.dcache.main_pipe_hit_resp.fire)
1013  XSPerfAccumulate("refill_resp_valid", io.dcache.refill_hit_resp.fire)
1014  XSPerfAccumulate("replay_resp_valid", io.dcache.replay_resp.fire)
1015  XSPerfAccumulate("coh_timeout", cohHasTimeOut)
1016
1017  // val (store_latency_sample, store_latency) = TransactionLatencyCounter(io.lsu.req.fire, io.lsu.resp.fire)
1018  // XSPerfHistogram("store_latency", store_latency, store_latency_sample, 0, 100, 10)
1019  // XSPerfAccumulate("store_req", io.lsu.req.fire)
1020
1021  val perfEvents = Seq(
1022    ("sbuffer_req_valid ", PopCount(VecInit(io.in.map(_.valid)).asUInt)                                                                ),
1023    ("sbuffer_req_fire  ", PopCount(VecInit(io.in.map(_.fire)).asUInt)                                                               ),
1024    ("sbuffer_merge     ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && canMerge(i)})).asUInt)                ),
1025    ("sbuffer_newline   ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && !canMerge(i)})).asUInt)               ),
1026    ("dcache_req_valid  ", io.dcache.req.valid                                                                                         ),
1027    ("dcache_req_fire   ", io.dcache.req.fire                                                                                        ),
1028    ("sbuffer_idle      ", sbuffer_state === x_idle                                                                                    ),
1029    ("sbuffer_flush     ", sbuffer_state === x_drain_sbuffer                                                                           ),
1030    ("sbuffer_replace   ", sbuffer_state === x_replace                                                                                 ),
1031    ("mpipe_resp_valid  ", io.dcache.main_pipe_hit_resp.fire                                                                         ),
1032    ("refill_resp_valid ", io.dcache.refill_hit_resp.fire                                                                            ),
1033    ("replay_resp_valid ", io.dcache.replay_resp.fire                                                                                ),
1034    ("coh_timeout       ", cohHasTimeOut                                                                                               ),
1035    ("sbuffer_1_4_valid ", (perf_valid_entry_count < (StoreBufferSize.U/4.U))                                                          ),
1036    ("sbuffer_2_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/4.U)) & (perf_valid_entry_count <= (StoreBufferSize.U/2.U))    ),
1037    ("sbuffer_3_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/2.U)) & (perf_valid_entry_count <= (StoreBufferSize.U*3.U/4.U))),
1038    ("sbuffer_full_valid", (perf_valid_entry_count > (StoreBufferSize.U*3.U/4.U)))
1039  )
1040  generatePerfEvent()
1041
1042}
1043