xref: /XiangShan/src/main/scala/xiangshan/mem/sbuffer/Sbuffer.scala (revision 4daa5bf3c3f27e7fd090866d52405b21e107eb8d)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import xiangshan._
23import utils._
24import utility._
25import xiangshan.cache._
26import difftest._
27import freechips.rocketchip.util._
28
29class SbufferFlushBundle extends Bundle {
30  val valid = Output(Bool())
31  val empty = Input(Bool())
32}
33
34trait HasSbufferConst extends HasXSParameter {
35  val EvictCycles = 1 << 20
36  val SbufferReplayDelayCycles = 16
37  require(isPow2(EvictCycles))
38  val EvictCountBits = log2Up(EvictCycles+1)
39  val MissqReplayCountBits = log2Up(SbufferReplayDelayCycles) + 1
40
41  // dcache write hit resp has 2 sources
42  // refill pipe resp and main pipe resp (fixed:only main pipe resp)
43  // val NumDcacheWriteResp = 2 // hardcoded
44  val NumDcacheWriteResp = 1 // hardcoded
45
46  val SbufferIndexWidth: Int = log2Up(StoreBufferSize)
47  // paddr = ptag + offset
48  val CacheLineBytes: Int = CacheLineSize / 8
49  val CacheLineWords: Int = CacheLineBytes / DataBytes
50  val OffsetWidth: Int = log2Up(CacheLineBytes)
51  val WordsWidth: Int = log2Up(CacheLineWords)
52  val PTagWidth: Int = PAddrBits - OffsetWidth
53  val VTagWidth: Int = VAddrBits - OffsetWidth
54  val WordOffsetWidth: Int = PAddrBits - WordsWidth
55
56  val CacheLineVWords: Int = CacheLineBytes / VDataBytes
57  val VWordsWidth: Int = log2Up(CacheLineVWords)
58  val VWordWidth: Int = log2Up(VDataBytes)
59  val VWordOffsetWidth: Int = PAddrBits - VWordWidth
60}
61
62class SbufferEntryState (implicit p: Parameters) extends SbufferBundle {
63  val state_valid    = Bool() // this entry is active
64  val state_inflight = Bool() // sbuffer is trying to write this entry to dcache
65  val w_timeout = Bool() // with timeout resp, waiting for resend store pipeline req timeout
66  val w_sameblock_inflight = Bool() // same cache block dcache req is inflight
67
68  def isInvalid(): Bool = !state_valid
69  def isValid(): Bool = state_valid
70  def isActive(): Bool = state_valid && !state_inflight
71  def isInflight(): Bool = state_inflight
72  def isDcacheReqCandidate(): Bool = state_valid && !state_inflight && !w_sameblock_inflight
73}
74
75class SbufferBundle(implicit p: Parameters) extends XSBundle with HasSbufferConst
76
77class DataWriteReq(implicit p: Parameters) extends SbufferBundle {
78  // univerisal writemask
79  val wvec = UInt(StoreBufferSize.W)
80  // 2 cycle update
81  val mask = UInt((VLEN/8).W)
82  val data = UInt(VLEN.W)
83  val vwordOffset = UInt(VWordOffsetWidth.W)
84  val wline = Bool() // write full cacheline
85}
86
87class MaskFlushReq(implicit p: Parameters) extends SbufferBundle {
88  // univerisal writemask
89  val wvec = UInt(StoreBufferSize.W)
90}
91
92class SbufferData(implicit p: Parameters) extends XSModule with HasSbufferConst {
93  val io = IO(new Bundle(){
94    // update data and mask when alloc or merge
95    val writeReq = Vec(EnsbufferWidth, Flipped(ValidIO(new DataWriteReq)))
96    // clean mask when deq
97    val maskFlushReq = Vec(NumDcacheWriteResp, Flipped(ValidIO(new MaskFlushReq)))
98    val dataOut = Output(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, UInt(8.W)))))
99    val maskOut = Output(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, Bool()))))
100  })
101
102  val data = Reg(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, UInt(8.W)))))
103  // val mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
104  val mask = RegInit(
105    VecInit(Seq.fill(StoreBufferSize)(
106      VecInit(Seq.fill(CacheLineVWords)(
107        VecInit(Seq.fill(VDataBytes)(false.B))
108      ))
109    ))
110  )
111
112  // 2 cycle line mask clean
113  for(line <- 0 until StoreBufferSize){
114    val line_mask_clean_flag = RegNext(
115      io.maskFlushReq.map(a => a.valid && a.bits.wvec(line)).reduce(_ || _)
116    )
117    line_mask_clean_flag.suggestName("line_mask_clean_flag_"+line)
118    when(line_mask_clean_flag){
119      for(word <- 0 until CacheLineVWords){
120        for(byte <- 0 until VDataBytes){
121          mask(line)(word)(byte) := false.B
122        }
123      }
124    }
125  }
126
127  // 2 cycle data / mask update
128  for(i <- 0 until EnsbufferWidth) {
129    val req = io.writeReq(i)
130    for(line <- 0 until StoreBufferSize){
131      val sbuffer_in_s1_line_wen = req.valid && req.bits.wvec(line)
132      val sbuffer_in_s2_line_wen = RegNext(sbuffer_in_s1_line_wen)
133      val line_write_buffer_data = RegEnable(req.bits.data, sbuffer_in_s1_line_wen)
134      val line_write_buffer_wline = RegEnable(req.bits.wline, sbuffer_in_s1_line_wen)
135      val line_write_buffer_mask = RegEnable(req.bits.mask, sbuffer_in_s1_line_wen)
136      val line_write_buffer_offset = RegEnable(req.bits.vwordOffset(VWordsWidth-1, 0), sbuffer_in_s1_line_wen)
137      sbuffer_in_s1_line_wen.suggestName("sbuffer_in_s1_line_wen_"+line)
138      sbuffer_in_s2_line_wen.suggestName("sbuffer_in_s2_line_wen_"+line)
139      line_write_buffer_data.suggestName("line_write_buffer_data_"+line)
140      line_write_buffer_wline.suggestName("line_write_buffer_wline_"+line)
141      line_write_buffer_mask.suggestName("line_write_buffer_mask_"+line)
142      line_write_buffer_offset.suggestName("line_write_buffer_offset_"+line)
143      for(word <- 0 until CacheLineVWords){
144        for(byte <- 0 until VDataBytes){
145          val write_byte = sbuffer_in_s2_line_wen && (
146            line_write_buffer_mask(byte) && (line_write_buffer_offset === word.U) ||
147            line_write_buffer_wline
148          )
149          when(write_byte){
150            data(line)(word)(byte) := line_write_buffer_data(byte*8+7, byte*8)
151            mask(line)(word)(byte) := true.B
152          }
153        }
154      }
155    }
156  }
157
158  // 1 cycle line mask clean
159  // for(i <- 0 until EnsbufferWidth) {
160  //   val req = io.writeReq(i)
161  //   when(req.valid){
162  //     for(line <- 0 until StoreBufferSize){
163  //       when(
164  //         req.bits.wvec(line) &&
165  //         req.bits.cleanMask
166  //       ){
167  //         for(word <- 0 until CacheLineWords){
168  //           for(byte <- 0 until DataBytes){
169  //             mask(line)(word)(byte) := false.B
170  //             val debug_last_cycle_write_byte = RegNext(req.valid && req.bits.wvec(line) && (
171  //               req.bits.mask(byte) && (req.bits.wordOffset(WordsWidth-1, 0) === word.U) ||
172  //               req.bits.wline
173  //             ))
174  //             assert(!debug_last_cycle_write_byte)
175  //           }
176  //         }
177  //       }
178  //     }
179  //   }
180  // }
181
182  io.dataOut := data
183  io.maskOut := mask
184}
185
186class Sbuffer(implicit p: Parameters) extends DCacheModule with HasSbufferConst with HasPerfEvents {
187  val io = IO(new Bundle() {
188    val hartId = Input(UInt(hartIdLen.W))
189    val in = Vec(EnsbufferWidth, Flipped(Decoupled(new DCacheWordReqWithVaddrAndPfFlag)))  //Todo: store logic only support Width == 2 now
190    val dcache = Flipped(new DCacheToSbufferIO)
191    val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
192    val sqempty = Input(Bool())
193    val flush = Flipped(new SbufferFlushBundle)
194    val csrCtrl = Flipped(new CustomCSRCtrlIO)
195    val store_prefetch = Vec(StorePipelineWidth, DecoupledIO(new StorePrefetchReq)) // to dcache
196    val memSetPattenDetected = Input(Bool())
197    val force_write = Input(Bool())
198  })
199
200  val dataModule = Module(new SbufferData)
201  dataModule.io.writeReq <> DontCare
202  val prefetcher = Module(new StorePfWrapper())
203  val writeReq = dataModule.io.writeReq
204
205  val ptag = Reg(Vec(StoreBufferSize, UInt(PTagWidth.W)))
206  val vtag = Reg(Vec(StoreBufferSize, UInt(VTagWidth.W)))
207  val debug_mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
208  val waitInflightMask = Reg(Vec(StoreBufferSize, UInt(StoreBufferSize.W)))
209  val data = dataModule.io.dataOut
210  val mask = dataModule.io.maskOut
211  val stateVec = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U.asTypeOf(new SbufferEntryState))))
212  val cohCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(EvictCountBits.W))))
213  val missqReplayCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(MissqReplayCountBits.W))))
214
215  val sbuffer_out_s0_fire = Wire(Bool())
216
217  /*
218       idle --[flush]   --> drain   --[buf empty]--> idle
219            --[buf full]--> replace --[dcache resp]--> idle
220  */
221  // x_drain_all: drain store queue and sbuffer
222  // x_drain_sbuffer: drain sbuffer only, block store queue to sbuffer write
223  val x_idle :: x_replace :: x_drain_all :: x_drain_sbuffer :: Nil = Enum(4)
224  def needDrain(state: UInt): Bool =
225    state(1)
226  val sbuffer_state = RegInit(x_idle)
227
228  // ---------------------- Store Enq Sbuffer ---------------------
229
230  def getPTag(pa: UInt): UInt =
231    pa(PAddrBits - 1, PAddrBits - PTagWidth)
232
233  def getVTag(va: UInt): UInt =
234    va(VAddrBits - 1, VAddrBits - VTagWidth)
235
236  def getWord(pa: UInt): UInt =
237    pa(PAddrBits-1, 3)
238
239  def getVWord(pa: UInt): UInt =
240    pa(PAddrBits-1, 4)
241
242  def getWordOffset(pa: UInt): UInt =
243    pa(OffsetWidth-1, 3)
244
245  def getVWordOffset(pa: UInt): UInt =
246    pa(OffsetWidth-1, 4)
247
248  def getAddr(ptag: UInt): UInt =
249    Cat(ptag, 0.U((PAddrBits - PTagWidth).W))
250
251  def getByteOffset(offect: UInt): UInt =
252    Cat(offect(OffsetWidth - 1, 3), 0.U(3.W))
253
254  def isOneOf(key: UInt, seq: Seq[UInt]): Bool =
255    if(seq.isEmpty) false.B else Cat(seq.map(_===key)).orR
256
257  def widthMap[T <: Data](f: Int => T) = (0 until StoreBufferSize) map f
258
259  // sbuffer entry count
260
261  val plru = new ValidPseudoLRU(StoreBufferSize)
262  val accessIdx = Wire(Vec(EnsbufferWidth + 1, Valid(UInt(SbufferIndexWidth.W))))
263
264  val candidateVec = VecInit(stateVec.map(s => s.isDcacheReqCandidate()))
265
266  val replaceAlgoIdx = plru.way(candidateVec.reverse)._2
267  val replaceAlgoNotDcacheCandidate = !stateVec(replaceAlgoIdx).isDcacheReqCandidate()
268
269  assert(!(candidateVec.asUInt.orR && replaceAlgoNotDcacheCandidate), "we have way to select, but replace algo selects invalid way")
270
271  val replaceIdx = replaceAlgoIdx
272  plru.access(accessIdx)
273
274  //-------------------------cohCount-----------------------------
275  // insert and merge: cohCount=0
276  // every cycle cohCount+=1
277  // if cohCount(EvictCountBits-1)==1, evict
278  val cohTimeOutMask = VecInit(widthMap(i => cohCount(i)(EvictCountBits - 1) && stateVec(i).isActive()))
279  val (cohTimeOutIdx, cohHasTimeOut) = PriorityEncoderWithFlag(cohTimeOutMask)
280  val cohTimeOutOH = PriorityEncoderOH(cohTimeOutMask)
281  val missqReplayTimeOutMask = VecInit(widthMap(i => missqReplayCount(i)(MissqReplayCountBits - 1) && stateVec(i).w_timeout))
282  val (missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen) = PriorityEncoderWithFlag(missqReplayTimeOutMask)
283  val missqReplayHasTimeOut = RegNext(missqReplayHasTimeOutGen) && !RegNext(sbuffer_out_s0_fire)
284  val missqReplayTimeOutIdx = RegEnable(missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen)
285
286  //-------------------------sbuffer enqueue-----------------------------
287
288  // Now sbuffer enq logic is divided into 3 stages:
289
290  // sbuffer_in_s0:
291  // * read data and meta from store queue
292  // * store them in 2 entry fifo queue
293
294  // sbuffer_in_s1:
295  // * read data and meta from fifo queue
296  // * update sbuffer meta (vtag, ptag, flag)
297  // * prevert that line from being sent to dcache (add a block condition)
298  // * prepare cacheline level write enable signal, RegNext() data and mask
299
300  // sbuffer_in_s2:
301  // * use cacheline level buffer to update sbuffer data and mask
302  // * remove dcache write block (if there is)
303
304  val activeMask = VecInit(stateVec.map(s => s.isActive()))
305  val validMask  = VecInit(stateVec.map(s => s.isValid()))
306  val drainIdx = PriorityEncoder(activeMask)
307
308  val inflightMask = VecInit(stateVec.map(s => s.isInflight()))
309
310  val inptags = io.in.map(in => getPTag(in.bits.addr))
311  val invtags = io.in.map(in => getVTag(in.bits.vaddr))
312  val sameTag = inptags(0) === inptags(1)
313  val firstWord = getVWord(io.in(0).bits.addr)
314  val secondWord = getVWord(io.in(1).bits.addr)
315  // merge condition
316  val mergeMask = Wire(Vec(EnsbufferWidth, Vec(StoreBufferSize, Bool())))
317  val mergeIdx = mergeMask.map(PriorityEncoder(_)) // avoid using mergeIdx for better timing
318  val canMerge = mergeMask.map(ParallelOR(_))
319  val mergeVec = mergeMask.map(_.asUInt)
320
321  for(i <- 0 until EnsbufferWidth){
322    mergeMask(i) := widthMap(j =>
323      inptags(i) === ptag(j) && activeMask(j)
324    )
325    assert(!(PopCount(mergeMask(i).asUInt) > 1.U && io.in(i).fire))
326  }
327
328  // insert condition
329  // firstInsert: the first invalid entry
330  // if first entry canMerge or second entry has the same ptag with the first entry,
331  // secondInsert equal the first invalid entry, otherwise, the second invalid entry
332  val invalidMask = VecInit(stateVec.map(s => s.isInvalid()))
333  val evenInvalidMask = GetEvenBits(invalidMask.asUInt)
334  val oddInvalidMask = GetOddBits(invalidMask.asUInt)
335
336  def getFirstOneOH(input: UInt): UInt = {
337    assert(input.getWidth > 1)
338    val output = WireInit(VecInit(input.asBools))
339    (1 until input.getWidth).map(i => {
340      output(i) := !input(i - 1, 0).orR && input(i)
341    })
342    output.asUInt
343  }
344
345  val evenRawInsertVec = getFirstOneOH(evenInvalidMask)
346  val oddRawInsertVec = getFirstOneOH(oddInvalidMask)
347  val (evenRawInsertIdx, evenCanInsert) = PriorityEncoderWithFlag(evenInvalidMask)
348  val (oddRawInsertIdx, oddCanInsert) = PriorityEncoderWithFlag(oddInvalidMask)
349  val evenInsertIdx = Cat(evenRawInsertIdx, 0.U(1.W)) // slow to generate, for debug only
350  val oddInsertIdx = Cat(oddRawInsertIdx, 1.U(1.W)) // slow to generate, for debug only
351  val evenInsertVec = GetEvenBits.reverse(evenRawInsertVec)
352  val oddInsertVec = GetOddBits.reverse(oddRawInsertVec)
353
354  val enbufferSelReg = RegInit(false.B)
355  when(io.in(0).valid) {
356    enbufferSelReg := ~enbufferSelReg
357  }
358
359  val firstInsertIdx = Mux(enbufferSelReg, evenInsertIdx, oddInsertIdx) // slow to generate, for debug only
360  val secondInsertIdx = Mux(sameTag,
361    firstInsertIdx,
362    Mux(~enbufferSelReg, evenInsertIdx, oddInsertIdx)
363  ) // slow to generate, for debug only
364  val firstInsertVec = Mux(enbufferSelReg, evenInsertVec, oddInsertVec)
365  val secondInsertVec = Mux(sameTag,
366    firstInsertVec,
367    Mux(~enbufferSelReg, evenInsertVec, oddInsertVec)
368  ) // slow to generate, for debug only
369  val firstCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(enbufferSelReg, evenCanInsert, oddCanInsert)
370  val secondCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(sameTag,
371    firstCanInsert,
372    Mux(~enbufferSelReg, evenCanInsert, oddCanInsert)
373  ) && (EnsbufferWidth >= 1).B
374  val forward_need_uarch_drain = WireInit(false.B)
375  val merge_need_uarch_drain = WireInit(false.B)
376  val do_uarch_drain = RegNext(forward_need_uarch_drain) || RegNext(RegNext(merge_need_uarch_drain))
377  XSPerfAccumulate("do_uarch_drain", do_uarch_drain)
378
379  io.in(0).ready := firstCanInsert
380  io.in(1).ready := secondCanInsert && io.in(0).ready
381
382  for (i <- 0 until EnsbufferWidth) {
383    // train
384    if (EnableStorePrefetchSPB) {
385      prefetcher.io.sbuffer_enq(i).valid := io.in(i).fire
386      prefetcher.io.sbuffer_enq(i).bits := DontCare
387      prefetcher.io.sbuffer_enq(i).bits.vaddr := io.in(i).bits.vaddr
388    } else {
389      prefetcher.io.sbuffer_enq(i).valid := false.B
390      prefetcher.io.sbuffer_enq(i).bits := DontCare
391    }
392
393    // prefetch req
394    if (EnableStorePrefetchAtCommit) {
395      if (EnableAtCommitMissTrigger) {
396        io.store_prefetch(i).valid := prefetcher.io.prefetch_req(i).valid || (io.in(i).fire && io.in(i).bits.prefetch)
397      } else {
398        io.store_prefetch(i).valid := prefetcher.io.prefetch_req(i).valid || io.in(i).fire
399      }
400      io.store_prefetch(i).bits.paddr := DontCare
401      io.store_prefetch(i).bits.vaddr := Mux(prefetcher.io.prefetch_req(i).valid, prefetcher.io.prefetch_req(i).bits.vaddr, io.in(i).bits.vaddr)
402      prefetcher.io.prefetch_req(i).ready := io.store_prefetch(i).ready
403    } else {
404      io.store_prefetch(i) <> prefetcher.io.prefetch_req(i)
405    }
406    io.store_prefetch zip prefetcher.io.prefetch_req drop 2 foreach (x => x._1 <> x._2)
407  }
408  prefetcher.io.memSetPattenDetected := io.memSetPattenDetected
409
410  def wordReqToBufLine( // allocate a new line in sbuffer
411    req: DCacheWordReq,
412    reqptag: UInt,
413    reqvtag: UInt,
414    insertIdx: UInt,
415    insertVec: UInt,
416    wordOffset: UInt
417  ): Unit = {
418    assert(UIntToOH(insertIdx) === insertVec)
419    val sameBlockInflightMask = genSameBlockInflightMask(reqptag)
420    (0 until StoreBufferSize).map(entryIdx => {
421      when(insertVec(entryIdx)){
422        stateVec(entryIdx).state_valid := true.B
423        stateVec(entryIdx).w_sameblock_inflight := sameBlockInflightMask.orR // set w_sameblock_inflight when a line is first allocated
424        when(sameBlockInflightMask.orR){
425          waitInflightMask(entryIdx) := sameBlockInflightMask
426        }
427        cohCount(entryIdx) := 0.U
428        // missqReplayCount(insertIdx) := 0.U
429        ptag(entryIdx) := reqptag
430        vtag(entryIdx) := reqvtag // update vtag if a new sbuffer line is allocated
431      }
432    })
433  }
434
435  def mergeWordReq( // merge write req into an existing line
436    req: DCacheWordReq,
437    reqptag: UInt,
438    reqvtag: UInt,
439    mergeIdx: UInt,
440    mergeVec: UInt,
441    wordOffset: UInt
442  ): Unit = {
443    assert(UIntToOH(mergeIdx) === mergeVec)
444    (0 until StoreBufferSize).map(entryIdx => {
445      when(mergeVec(entryIdx)) {
446        cohCount(entryIdx) := 0.U
447        // missqReplayCount(entryIdx) := 0.U
448        // check if vtag is the same, if not, trigger sbuffer flush
449        when(reqvtag =/= vtag(entryIdx)) {
450          XSDebug("reqvtag =/= sbufvtag req(vtag %x ptag %x) sbuffer(vtag %x ptag %x)\n",
451            reqvtag << OffsetWidth,
452            reqptag << OffsetWidth,
453            vtag(entryIdx) << OffsetWidth,
454            ptag(entryIdx) << OffsetWidth
455          )
456          merge_need_uarch_drain := true.B
457        }
458      }
459    })
460  }
461
462  for(((in, vwordOffset), i) <- io.in.zip(Seq(firstWord, secondWord)).zipWithIndex){
463    writeReq(i).valid := in.fire
464    writeReq(i).bits.vwordOffset := vwordOffset
465    writeReq(i).bits.mask := in.bits.mask
466    writeReq(i).bits.data := in.bits.data
467    writeReq(i).bits.wline := in.bits.wline
468    val debug_insertIdx = if(i == 0) firstInsertIdx else secondInsertIdx
469    val insertVec = if(i == 0) firstInsertVec else secondInsertVec
470    assert(!((PopCount(insertVec) > 1.U) && in.fire))
471    val insertIdx = OHToUInt(insertVec)
472    accessIdx(i).valid := RegNext(in.fire)
473    accessIdx(i).bits := RegNext(Mux(canMerge(i), mergeIdx(i), insertIdx))
474    when(in.fire){
475      when(canMerge(i)){
476        writeReq(i).bits.wvec := mergeVec(i)
477        mergeWordReq(in.bits, inptags(i), invtags(i), mergeIdx(i), mergeVec(i), vwordOffset)
478        XSDebug(p"merge req $i to line [${mergeIdx(i)}]\n")
479      }.otherwise({
480        writeReq(i).bits.wvec := insertVec
481        wordReqToBufLine(in.bits, inptags(i), invtags(i), insertIdx, insertVec, vwordOffset)
482        XSDebug(p"insert req $i to line[$insertIdx]\n")
483        assert(debug_insertIdx === insertIdx)
484      })
485    }
486  }
487
488
489  for(i <- 0 until StoreBufferSize){
490    XSDebug(stateVec(i).isValid(),
491      p"[$i] timeout:${cohCount(i)(EvictCountBits-1)} state:${stateVec(i)}\n"
492    )
493  }
494
495  for((req, i) <- io.in.zipWithIndex){
496    XSDebug(req.fire,
497      p"accept req [$i]: " +
498        p"addr:${Hexadecimal(req.bits.addr)} " +
499        p"mask:${Binary(shiftMaskToLow(req.bits.addr,req.bits.mask))} " +
500        p"data:${Hexadecimal(shiftDataToLow(req.bits.addr,req.bits.data))}\n"
501    )
502    XSDebug(req.valid && !req.ready,
503      p"req [$i] blocked by sbuffer\n"
504    )
505  }
506
507  // for now, when enq, trigger a prefetch (if EnableAtCommitMissTrigger)
508  require(EnsbufferWidth <= StorePipelineWidth)
509
510  // ---------------------- Send Dcache Req ---------------------
511
512  val sbuffer_empty = Cat(invalidMask).andR
513  val sq_empty = !Cat(io.in.map(_.valid)).orR
514  val empty = sbuffer_empty && sq_empty
515  val threshold = Wire(UInt(5.W)) // RegNext(io.csrCtrl.sbuffer_threshold +& 1.U)
516  threshold := Constantin.createRecord(s"StoreBufferThreshold_${p(XSCoreParamsKey).HartId}", initValue = 7)
517  val base = Wire(UInt(5.W))
518  base := Constantin.createRecord(s"StoreBufferBase_${p(XSCoreParamsKey).HartId}", initValue = 4)
519  val ActiveCount = PopCount(activeMask)
520  val ValidCount = PopCount(validMask)
521  val forceThreshold = Mux(io.force_write, threshold - base, threshold)
522  val do_eviction = RegNext(ActiveCount >= forceThreshold || ActiveCount === (StoreBufferSize-1).U || ValidCount === (StoreBufferSize).U, init = false.B)
523  require((StoreBufferThreshold + 1) <= StoreBufferSize)
524
525  XSDebug(p"ActiveCount[$ActiveCount]\n")
526
527  io.flush.empty := RegNext(empty && io.sqempty)
528  // lru.io.flush := sbuffer_state === x_drain_all && empty
529  switch(sbuffer_state){
530    is(x_idle){
531      when(io.flush.valid){
532        sbuffer_state := x_drain_all
533      }.elsewhen(do_uarch_drain){
534        sbuffer_state := x_drain_sbuffer
535      }.elsewhen(do_eviction){
536        sbuffer_state := x_replace
537      }
538    }
539    is(x_drain_all){
540      when(empty){
541        sbuffer_state := x_idle
542      }
543    }
544    is(x_drain_sbuffer){
545      when(io.flush.valid){
546        sbuffer_state := x_drain_all
547      }.elsewhen(sbuffer_empty){
548        sbuffer_state := x_idle
549      }
550    }
551    is(x_replace){
552      when(io.flush.valid){
553        sbuffer_state := x_drain_all
554      }.elsewhen(do_uarch_drain){
555        sbuffer_state := x_drain_sbuffer
556      }.elsewhen(!do_eviction){
557        sbuffer_state := x_idle
558      }
559    }
560  }
561  XSDebug(p"sbuffer state:${sbuffer_state} do eviction:${do_eviction} empty:${empty}\n")
562
563  def noSameBlockInflight(idx: UInt): Bool = {
564    // stateVec(idx) itself must not be s_inflight
565    !Cat(widthMap(i => inflightMask(i) && ptag(idx) === ptag(i))).orR
566  }
567
568  def genSameBlockInflightMask(ptag_in: UInt): UInt = {
569    val mask = VecInit(widthMap(i => inflightMask(i) && ptag_in === ptag(i))).asUInt // quite slow, use it with care
570    assert(!(PopCount(mask) > 1.U))
571    mask
572  }
573
574  def haveSameBlockInflight(ptag_in: UInt): Bool = {
575    genSameBlockInflightMask(ptag_in).orR
576  }
577
578  // ---------------------------------------------------------------------------
579  // sbuffer to dcache pipeline
580  // ---------------------------------------------------------------------------
581
582  // Now sbuffer deq logic is divided into 2 stages:
583
584  // sbuffer_out_s0:
585  // * read data and meta from sbuffer
586  // * RegNext() them
587  // * set line state to inflight
588
589  // sbuffer_out_s1:
590  // * send write req to dcache
591
592  // sbuffer_out_extra:
593  // * receive write result from dcache
594  // * update line state
595
596  val sbuffer_out_s1_ready = Wire(Bool())
597
598  // ---------------------------------------------------------------------------
599  // sbuffer_out_s0
600  // ---------------------------------------------------------------------------
601
602  val need_drain = needDrain(sbuffer_state)
603  val need_replace = do_eviction || (sbuffer_state === x_replace)
604  val sbuffer_out_s0_evictionIdx = Mux(missqReplayHasTimeOut,
605    missqReplayTimeOutIdx,
606    Mux(need_drain,
607      drainIdx,
608      Mux(cohHasTimeOut, cohTimeOutIdx, replaceIdx)
609    )
610  )
611
612  // If there is a inflight dcache req which has same ptag with sbuffer_out_s0_evictionIdx's ptag,
613  // current eviction should be blocked.
614  val sbuffer_out_s0_valid = missqReplayHasTimeOut ||
615    stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate() &&
616    (need_drain || cohHasTimeOut || need_replace)
617  assert(!(
618    stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate &&
619    !noSameBlockInflight(sbuffer_out_s0_evictionIdx)
620  ))
621  val sbuffer_out_s0_cango = sbuffer_out_s1_ready
622  sbuffer_out_s0_fire := sbuffer_out_s0_valid && sbuffer_out_s0_cango
623
624  // ---------------------------------------------------------------------------
625  // sbuffer_out_s1
626  // ---------------------------------------------------------------------------
627
628  // TODO: use EnsbufferWidth
629  val shouldWaitWriteFinish = RegNext(VecInit((0 until EnsbufferWidth).map{i =>
630    (writeReq(i).bits.wvec.asUInt & UIntToOH(sbuffer_out_s0_evictionIdx).asUInt).orR &&
631    writeReq(i).valid
632  }).asUInt.orR)
633  // block dcache write if read / write hazard
634  val blockDcacheWrite = shouldWaitWriteFinish
635
636  val sbuffer_out_s1_valid = RegInit(false.B)
637  sbuffer_out_s1_ready := io.dcache.req.ready && !blockDcacheWrite || !sbuffer_out_s1_valid
638  val sbuffer_out_s1_fire = io.dcache.req.fire
639
640  // when sbuffer_out_s1_fire, send dcache req stored in pipeline reg to dcache
641  when(sbuffer_out_s1_fire){
642    sbuffer_out_s1_valid := false.B
643  }
644  // when sbuffer_out_s0_fire, read dcache req data and store them in a pipeline reg
645  when(sbuffer_out_s0_cango){
646    sbuffer_out_s1_valid := sbuffer_out_s0_valid
647  }
648  when(sbuffer_out_s0_fire){
649    stateVec(sbuffer_out_s0_evictionIdx).state_inflight := true.B
650    stateVec(sbuffer_out_s0_evictionIdx).w_timeout := false.B
651    // stateVec(sbuffer_out_s0_evictionIdx).s_pipe_req := true.B
652    XSDebug(p"$sbuffer_out_s0_evictionIdx will be sent to Dcache\n")
653  }
654
655  XSDebug(p"need drain:$need_drain cohHasTimeOut: $cohHasTimeOut need replace:$need_replace\n")
656  XSDebug(p"drainIdx:$drainIdx tIdx:$cohTimeOutIdx replIdx:$replaceIdx " +
657    p"blocked:${!noSameBlockInflight(sbuffer_out_s0_evictionIdx)} v:${activeMask(sbuffer_out_s0_evictionIdx)}\n")
658  XSDebug(p"sbuffer_out_s0_valid:$sbuffer_out_s0_valid evictIdx:$sbuffer_out_s0_evictionIdx dcache ready:${io.dcache.req.ready}\n")
659  // Note: if other dcache req in the same block are inflight,
660  // the lru update may not accurate
661  accessIdx(EnsbufferWidth).valid := invalidMask(replaceIdx) || (
662    need_replace && !need_drain && !cohHasTimeOut && !missqReplayHasTimeOut && sbuffer_out_s0_cango && activeMask(replaceIdx))
663  accessIdx(EnsbufferWidth).bits := replaceIdx
664  val sbuffer_out_s1_evictionIdx = RegEnable(sbuffer_out_s0_evictionIdx, sbuffer_out_s0_fire)
665  val sbuffer_out_s1_evictionPTag = RegEnable(ptag(sbuffer_out_s0_evictionIdx), sbuffer_out_s0_fire)
666  val sbuffer_out_s1_evictionVTag = RegEnable(vtag(sbuffer_out_s0_evictionIdx), sbuffer_out_s0_fire)
667
668  io.dcache.req.valid := sbuffer_out_s1_valid && !blockDcacheWrite
669  io.dcache.req.bits := DontCare
670  io.dcache.req.bits.cmd   := MemoryOpConstants.M_XWR
671  io.dcache.req.bits.addr  := getAddr(sbuffer_out_s1_evictionPTag)
672  io.dcache.req.bits.vaddr := getAddr(sbuffer_out_s1_evictionVTag)
673  io.dcache.req.bits.data  := data(sbuffer_out_s1_evictionIdx).asUInt
674  io.dcache.req.bits.mask  := mask(sbuffer_out_s1_evictionIdx).asUInt
675  io.dcache.req.bits.id := sbuffer_out_s1_evictionIdx
676
677  when (sbuffer_out_s1_fire) {
678    assert(!(io.dcache.req.bits.vaddr === 0.U))
679    assert(!(io.dcache.req.bits.addr === 0.U))
680  }
681
682  XSDebug(sbuffer_out_s1_fire,
683    p"send buf [$sbuffer_out_s1_evictionIdx] to Dcache, req fire\n"
684  )
685
686  // update sbuffer status according to dcache resp source
687
688  def id_to_sbuffer_id(id: UInt): UInt = {
689    require(id.getWidth >= log2Up(StoreBufferSize))
690    id(log2Up(StoreBufferSize)-1, 0)
691  }
692
693  // hit resp
694  io.dcache.hit_resps.map(resp => {
695    val dcache_resp_id = resp.bits.id
696    when (resp.fire) {
697      stateVec(dcache_resp_id).state_inflight := false.B
698      stateVec(dcache_resp_id).state_valid := false.B
699      assert(!resp.bits.replay)
700      assert(!resp.bits.miss) // not need to resp if miss, to be opted
701      assert(stateVec(dcache_resp_id).state_inflight === true.B)
702    }
703
704    // Update w_sameblock_inflight flag is delayed for 1 cycle
705    //
706    // When a new req allocate a new line in sbuffer, sameblock_inflight check will ignore
707    // current dcache.hit_resps. Then, in the next cycle, we have plenty of time to check
708    // if the same block is still inflight
709    (0 until StoreBufferSize).map(i => {
710      when(
711        stateVec(i).w_sameblock_inflight &&
712        stateVec(i).state_valid &&
713        RegNext(resp.fire) &&
714        waitInflightMask(i) === UIntToOH(RegNext(id_to_sbuffer_id(dcache_resp_id)))
715      ){
716        stateVec(i).w_sameblock_inflight := false.B
717      }
718    })
719  })
720
721  io.dcache.hit_resps.zip(dataModule.io.maskFlushReq).map{case (resp, maskFlush) => {
722    maskFlush.valid := resp.fire
723    maskFlush.bits.wvec := UIntToOH(resp.bits.id)
724  }}
725
726  // replay resp
727  val replay_resp_id = io.dcache.replay_resp.bits.id
728  when (io.dcache.replay_resp.fire) {
729    missqReplayCount(replay_resp_id) := 0.U
730    stateVec(replay_resp_id).w_timeout := true.B
731    // waiting for timeout
732    assert(io.dcache.replay_resp.bits.replay)
733    assert(stateVec(replay_resp_id).state_inflight === true.B)
734  }
735
736  // TODO: reuse cohCount
737  (0 until StoreBufferSize).map(i => {
738    when(stateVec(i).w_timeout && stateVec(i).state_inflight && !missqReplayCount(i)(MissqReplayCountBits-1)) {
739      missqReplayCount(i) := missqReplayCount(i) + 1.U
740    }
741    when(activeMask(i) && !cohTimeOutMask(i)){
742      cohCount(i) := cohCount(i)+1.U
743    }
744  })
745
746  if (env.EnableDifftest) {
747    // hit resp
748    io.dcache.hit_resps.zipWithIndex.map{case (resp, index) => {
749      val difftest = DifftestModule(new DiffSbufferEvent, delay = 1)
750      val dcache_resp_id = resp.bits.id
751      difftest.coreid := io.hartId
752      difftest.index  := index.U
753      difftest.valid  := resp.fire
754      difftest.addr   := getAddr(ptag(dcache_resp_id))
755      difftest.data   := data(dcache_resp_id).asTypeOf(Vec(CacheLineBytes, UInt(8.W)))
756      difftest.mask   := mask(dcache_resp_id).asUInt
757    }}
758  }
759
760  // ---------------------- Load Data Forward ---------------------
761  val mismatch = Wire(Vec(LoadPipelineWidth, Bool()))
762  XSPerfAccumulate("vaddr_match_failed", mismatch(0) || mismatch(1))
763  for ((forward, i) <- io.forward.zipWithIndex) {
764    val vtag_matches = VecInit(widthMap(w => vtag(w) === getVTag(forward.vaddr)))
765    // ptag_matches uses paddr from dtlb, which is far from sbuffer
766    val ptag_matches = VecInit(widthMap(w => RegEnable(ptag(w), forward.valid) === RegEnable(getPTag(forward.paddr), forward.valid)))
767    val tag_matches = vtag_matches
768    val tag_mismatch = RegNext(forward.valid) && VecInit(widthMap(w =>
769      RegNext(vtag_matches(w)) =/= ptag_matches(w) && RegNext((activeMask(w) || inflightMask(w)))
770    )).asUInt.orR
771    mismatch(i) := tag_mismatch
772    when (tag_mismatch) {
773      XSDebug("forward tag mismatch: pmatch %x vmatch %x vaddr %x paddr %x\n",
774        RegNext(ptag_matches.asUInt),
775        RegNext(vtag_matches.asUInt),
776        RegNext(forward.vaddr),
777        RegNext(forward.paddr)
778      )
779      forward_need_uarch_drain := true.B
780    }
781    val valid_tag_matches = widthMap(w => tag_matches(w) && activeMask(w))
782    val inflight_tag_matches = widthMap(w => tag_matches(w) && inflightMask(w))
783    val line_offset_mask = UIntToOH(getVWordOffset(forward.paddr))
784
785    val valid_tag_match_reg = valid_tag_matches.map(RegNext(_))
786    val inflight_tag_match_reg = inflight_tag_matches.map(RegNext(_))
787    val line_offset_reg = RegNext(line_offset_mask)
788    val forward_mask_candidate_reg = RegEnable(
789      VecInit(mask.map(entry => entry(getVWordOffset(forward.paddr)))),
790      forward.valid
791    )
792    val forward_data_candidate_reg = RegEnable(
793      VecInit(data.map(entry => entry(getVWordOffset(forward.paddr)))),
794      forward.valid
795    )
796
797    val selectedValidMask = Mux1H(valid_tag_match_reg, forward_mask_candidate_reg)
798    val selectedValidData = Mux1H(valid_tag_match_reg, forward_data_candidate_reg)
799    selectedValidMask.suggestName("selectedValidMask_"+i)
800    selectedValidData.suggestName("selectedValidData_"+i)
801
802    val selectedInflightMask = Mux1H(inflight_tag_match_reg, forward_mask_candidate_reg)
803    val selectedInflightData = Mux1H(inflight_tag_match_reg, forward_data_candidate_reg)
804    selectedInflightMask.suggestName("selectedInflightMask_"+i)
805    selectedInflightData.suggestName("selectedInflightData_"+i)
806
807    // currently not being used
808    val selectedInflightMaskFast = Mux1H(line_offset_mask, Mux1H(inflight_tag_matches, mask).asTypeOf(Vec(CacheLineVWords, Vec(VDataBytes, Bool()))))
809    val selectedValidMaskFast = Mux1H(line_offset_mask, Mux1H(valid_tag_matches, mask).asTypeOf(Vec(CacheLineVWords, Vec(VDataBytes, Bool()))))
810
811    forward.dataInvalid := false.B // data in store line merge buffer is always ready
812    forward.matchInvalid := tag_mismatch // paddr / vaddr cam result does not match
813    for (j <- 0 until VDataBytes) {
814      forward.forwardMask(j) := false.B
815      forward.forwardData(j) := DontCare
816
817      // valid entries have higher priority than inflight entries
818      when(selectedInflightMask(j)) {
819        forward.forwardMask(j) := true.B
820        forward.forwardData(j) := selectedInflightData(j)
821      }
822      when(selectedValidMask(j)) {
823        forward.forwardMask(j) := true.B
824        forward.forwardData(j) := selectedValidData(j)
825      }
826
827      forward.forwardMaskFast(j) := selectedInflightMaskFast(j) || selectedValidMaskFast(j)
828    }
829    forward.addrInvalid := DontCare
830  }
831
832  for (i <- 0 until StoreBufferSize) {
833    XSDebug("sbf entry " + i + " : ptag %x vtag %x valid %x active %x inflight %x w_timeout %x\n",
834      ptag(i) << OffsetWidth,
835      vtag(i) << OffsetWidth,
836      stateVec(i).isValid(),
837      activeMask(i),
838      inflightMask(i),
839      stateVec(i).w_timeout
840    )
841  }
842
843  if (env.EnableDifftest) {
844    for (i <- 0 until EnsbufferWidth) {
845      val storeCommit = io.in(i).fire
846      val waddr = ZeroExt(Cat(io.in(i).bits.addr(PAddrBits - 1, 3), 0.U(3.W)), 64)
847      val sbufferMask = shiftMaskToLow(io.in(i).bits.addr, io.in(i).bits.mask)
848      val sbufferData = shiftDataToLow(io.in(i).bits.addr, io.in(i).bits.data)
849      val wmask = sbufferMask
850      val wdata = sbufferData & MaskExpand(sbufferMask)
851
852      val difftest = DifftestModule(new DiffStoreEvent, delay = 2)
853      difftest.coreid := io.hartId
854      difftest.index  := i.U
855      difftest.valid  := storeCommit
856      difftest.addr   := waddr
857      difftest.data   := wdata
858      difftest.mask   := wmask
859    }
860  }
861
862  val perf_valid_entry_count = RegNext(PopCount(VecInit(stateVec.map(s => !s.isInvalid())).asUInt))
863  XSPerfHistogram("util", perf_valid_entry_count, true.B, 0, StoreBufferSize, 1)
864  XSPerfAccumulate("sbuffer_req_valid", PopCount(VecInit(io.in.map(_.valid)).asUInt))
865  XSPerfAccumulate("sbuffer_req_fire", PopCount(VecInit(io.in.map(_.fire)).asUInt))
866  XSPerfAccumulate("sbuffer_merge", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && canMerge(i)})).asUInt))
867  XSPerfAccumulate("sbuffer_newline", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && !canMerge(i)})).asUInt))
868  XSPerfAccumulate("dcache_req_valid", io.dcache.req.valid)
869  XSPerfAccumulate("dcache_req_fire", io.dcache.req.fire)
870  XSPerfAccumulate("sbuffer_idle", sbuffer_state === x_idle)
871  XSPerfAccumulate("sbuffer_flush", sbuffer_state === x_drain_sbuffer)
872  XSPerfAccumulate("sbuffer_replace", sbuffer_state === x_replace)
873  XSPerfAccumulate("evenCanInsert", evenCanInsert)
874  XSPerfAccumulate("oddCanInsert", oddCanInsert)
875  XSPerfAccumulate("mainpipe_resp_valid", io.dcache.main_pipe_hit_resp.fire)
876  //XSPerfAccumulate("refill_resp_valid", io.dcache.refill_hit_resp.fire)
877  XSPerfAccumulate("replay_resp_valid", io.dcache.replay_resp.fire)
878  XSPerfAccumulate("coh_timeout", cohHasTimeOut)
879
880  // val (store_latency_sample, store_latency) = TransactionLatencyCounter(io.lsu.req.fire, io.lsu.resp.fire)
881  // XSPerfHistogram("store_latency", store_latency, store_latency_sample, 0, 100, 10)
882  // XSPerfAccumulate("store_req", io.lsu.req.fire)
883
884  val perfEvents = Seq(
885    ("sbuffer_req_valid ", PopCount(VecInit(io.in.map(_.valid)).asUInt)                                                                ),
886    ("sbuffer_req_fire  ", PopCount(VecInit(io.in.map(_.fire)).asUInt)                                                               ),
887    ("sbuffer_merge     ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && canMerge(i)})).asUInt)                ),
888    ("sbuffer_newline   ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && !canMerge(i)})).asUInt)               ),
889    ("dcache_req_valid  ", io.dcache.req.valid                                                                                         ),
890    ("dcache_req_fire   ", io.dcache.req.fire                                                                                        ),
891    ("sbuffer_idle      ", sbuffer_state === x_idle                                                                                    ),
892    ("sbuffer_flush     ", sbuffer_state === x_drain_sbuffer                                                                           ),
893    ("sbuffer_replace   ", sbuffer_state === x_replace                                                                                 ),
894    ("mpipe_resp_valid  ", io.dcache.main_pipe_hit_resp.fire                                                                         ),
895    //("refill_resp_valid ", io.dcache.refill_hit_resp.fire                                                                            ),
896    ("replay_resp_valid ", io.dcache.replay_resp.fire                                                                                ),
897    ("coh_timeout       ", cohHasTimeOut                                                                                               ),
898    ("sbuffer_1_4_valid ", (perf_valid_entry_count < (StoreBufferSize.U/4.U))                                                          ),
899    ("sbuffer_2_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/4.U)) & (perf_valid_entry_count <= (StoreBufferSize.U/2.U))    ),
900    ("sbuffer_3_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/2.U)) & (perf_valid_entry_count <= (StoreBufferSize.U*3.U/4.U))),
901    ("sbuffer_full_valid", (perf_valid_entry_count > (StoreBufferSize.U*3.U/4.U)))
902  )
903  generatePerfEvent()
904
905}
906