xref: /XiangShan/src/main/scala/xiangshan/mem/sbuffer/Sbuffer.scala (revision 0466583513e4c1ddbbb566b866b8963635acb20f)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import xiangshan._
23import utils._
24import utility._
25import xiangshan.cache._
26import difftest._
27import freechips.rocketchip.util._
28
29class SbufferFlushBundle extends Bundle {
30  val valid = Output(Bool())
31  val empty = Input(Bool())
32}
33
34trait HasSbufferConst extends HasXSParameter {
35  val EvictCycles = 1 << 20
36  val SbufferReplayDelayCycles = 16
37  require(isPow2(EvictCycles))
38  val EvictCountBits = log2Up(EvictCycles+1)
39  val MissqReplayCountBits = log2Up(SbufferReplayDelayCycles) + 1
40
41  // dcache write hit resp has 2 sources
42  // refill pipe resp and main pipe resp
43  val NumDcacheWriteResp = 2 // hardcoded
44
45  val SbufferIndexWidth: Int = log2Up(StoreBufferSize)
46  // paddr = ptag + offset
47  val CacheLineBytes: Int = CacheLineSize / 8
48  val CacheLineWords: Int = CacheLineBytes / DataBytes
49  val OffsetWidth: Int = log2Up(CacheLineBytes)
50  val WordsWidth: Int = log2Up(CacheLineWords)
51  val PTagWidth: Int = PAddrBits - OffsetWidth
52  val VTagWidth: Int = VAddrBits - OffsetWidth
53  val WordOffsetWidth: Int = PAddrBits - WordsWidth
54
55  val CacheLineVWords: Int = CacheLineBytes / VDataBytes
56  val VWordsWidth: Int = log2Up(CacheLineVWords)
57  val VWordWidth: Int = log2Up(VDataBytes)
58  val VWordOffsetWidth: Int = PAddrBits - VWordWidth
59}
60
61class SbufferEntryState (implicit p: Parameters) extends SbufferBundle {
62  val state_valid    = Bool() // this entry is active
63  val state_inflight = Bool() // sbuffer is trying to write this entry to dcache
64  val w_timeout = Bool() // with timeout resp, waiting for resend store pipeline req timeout
65  val w_sameblock_inflight = Bool() // same cache block dcache req is inflight
66
67  def isInvalid(): Bool = !state_valid
68  def isValid(): Bool = state_valid
69  def isActive(): Bool = state_valid && !state_inflight
70  def isInflight(): Bool = state_inflight
71  def isDcacheReqCandidate(): Bool = state_valid && !state_inflight && !w_sameblock_inflight
72}
73
74class SbufferBundle(implicit p: Parameters) extends XSBundle with HasSbufferConst
75
76class DataWriteReq(implicit p: Parameters) extends SbufferBundle {
77  // univerisal writemask
78  val wvec = UInt(StoreBufferSize.W)
79  // 2 cycle update
80  val mask = UInt((VLEN/8).W)
81  val data = UInt(VLEN.W)
82  val vwordOffset = UInt(VWordOffsetWidth.W)
83  val wline = Bool() // write full cacheline
84}
85
86class MaskFlushReq(implicit p: Parameters) extends SbufferBundle {
87  // univerisal writemask
88  val wvec = UInt(StoreBufferSize.W)
89}
90
91class SbufferData(implicit p: Parameters) extends XSModule with HasSbufferConst {
92  val io = IO(new Bundle(){
93    // update data and mask when alloc or merge
94    val writeReq = Vec(EnsbufferWidth, Flipped(ValidIO(new DataWriteReq)))
95    // clean mask when deq
96    val maskFlushReq = Vec(NumDcacheWriteResp, Flipped(ValidIO(new MaskFlushReq)))
97    val dataOut = Output(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, UInt(8.W)))))
98    val maskOut = Output(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, Bool()))))
99  })
100
101  val data = Reg(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, UInt(8.W)))))
102  // val mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
103  val mask = RegInit(
104    VecInit(Seq.fill(StoreBufferSize)(
105      VecInit(Seq.fill(CacheLineVWords)(
106        VecInit(Seq.fill(VDataBytes)(false.B))
107      ))
108    ))
109  )
110
111  // 2 cycle line mask clean
112  for(line <- 0 until StoreBufferSize){
113    val line_mask_clean_flag = RegNext(
114      io.maskFlushReq.map(a => a.valid && a.bits.wvec(line)).reduce(_ || _)
115    )
116    line_mask_clean_flag.suggestName("line_mask_clean_flag_"+line)
117    when(line_mask_clean_flag){
118      for(word <- 0 until CacheLineVWords){
119        for(byte <- 0 until VDataBytes){
120          mask(line)(word)(byte) := false.B
121        }
122      }
123    }
124  }
125
126  // 2 cycle data / mask update
127  for(i <- 0 until EnsbufferWidth) {
128    val req = io.writeReq(i)
129    for(line <- 0 until StoreBufferSize){
130      val sbuffer_in_s1_line_wen = req.valid && req.bits.wvec(line)
131      val sbuffer_in_s2_line_wen = RegNext(sbuffer_in_s1_line_wen)
132      val line_write_buffer_data = RegEnable(req.bits.data, sbuffer_in_s1_line_wen)
133      val line_write_buffer_wline = RegEnable(req.bits.wline, sbuffer_in_s1_line_wen)
134      val line_write_buffer_mask = RegEnable(req.bits.mask, sbuffer_in_s1_line_wen)
135      val line_write_buffer_offset = RegEnable(req.bits.vwordOffset(VWordsWidth-1, 0), sbuffer_in_s1_line_wen)
136      sbuffer_in_s1_line_wen.suggestName("sbuffer_in_s1_line_wen_"+line)
137      sbuffer_in_s2_line_wen.suggestName("sbuffer_in_s2_line_wen_"+line)
138      line_write_buffer_data.suggestName("line_write_buffer_data_"+line)
139      line_write_buffer_wline.suggestName("line_write_buffer_wline_"+line)
140      line_write_buffer_mask.suggestName("line_write_buffer_mask_"+line)
141      line_write_buffer_offset.suggestName("line_write_buffer_offset_"+line)
142      for(word <- 0 until CacheLineVWords){
143        for(byte <- 0 until VDataBytes){
144          val write_byte = sbuffer_in_s2_line_wen && (
145            line_write_buffer_mask(byte) && (line_write_buffer_offset === word.U) ||
146            line_write_buffer_wline
147          )
148          when(write_byte){
149            data(line)(word)(byte) := line_write_buffer_data(byte*8+7, byte*8)
150            mask(line)(word)(byte) := true.B
151          }
152        }
153      }
154    }
155  }
156
157  // 1 cycle line mask clean
158  // for(i <- 0 until EnsbufferWidth) {
159  //   val req = io.writeReq(i)
160  //   when(req.valid){
161  //     for(line <- 0 until StoreBufferSize){
162  //       when(
163  //         req.bits.wvec(line) &&
164  //         req.bits.cleanMask
165  //       ){
166  //         for(word <- 0 until CacheLineWords){
167  //           for(byte <- 0 until DataBytes){
168  //             mask(line)(word)(byte) := false.B
169  //             val debug_last_cycle_write_byte = RegNext(req.valid && req.bits.wvec(line) && (
170  //               req.bits.mask(byte) && (req.bits.wordOffset(WordsWidth-1, 0) === word.U) ||
171  //               req.bits.wline
172  //             ))
173  //             assert(!debug_last_cycle_write_byte)
174  //           }
175  //         }
176  //       }
177  //     }
178  //   }
179  // }
180
181  io.dataOut := data
182  io.maskOut := mask
183}
184
185class Sbuffer(implicit p: Parameters) extends DCacheModule with HasSbufferConst with HasPerfEvents {
186  val io = IO(new Bundle() {
187    val hartId = Input(UInt(8.W))
188    val in = Vec(EnsbufferWidth, Flipped(Decoupled(new DCacheWordReqWithVaddr)))  //Todo: store logic only support Width == 2 now
189    val dcache = Flipped(new DCacheToSbufferIO)
190    val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
191    val sqempty = Input(Bool())
192    val flush = Flipped(new SbufferFlushBundle)
193    val csrCtrl = Flipped(new CustomCSRCtrlIO)
194    val force_write = Input(Bool())
195  })
196
197  val dataModule = Module(new SbufferData)
198  dataModule.io.writeReq <> DontCare
199  val writeReq = dataModule.io.writeReq
200
201  val ptag = Reg(Vec(StoreBufferSize, UInt(PTagWidth.W)))
202  val vtag = Reg(Vec(StoreBufferSize, UInt(VTagWidth.W)))
203  val debug_mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
204  val waitInflightMask = Reg(Vec(StoreBufferSize, UInt(StoreBufferSize.W)))
205  val data = dataModule.io.dataOut
206  val mask = dataModule.io.maskOut
207  val stateVec = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U.asTypeOf(new SbufferEntryState))))
208  val cohCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(EvictCountBits.W))))
209  val missqReplayCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(MissqReplayCountBits.W))))
210
211  val sbuffer_out_s0_fire = Wire(Bool())
212
213  /*
214       idle --[flush]   --> drain   --[buf empty]--> idle
215            --[buf full]--> replace --[dcache resp]--> idle
216  */
217  // x_drain_all: drain store queue and sbuffer
218  // x_drain_sbuffer: drain sbuffer only, block store queue to sbuffer write
219  val x_idle :: x_replace :: x_drain_all :: x_drain_sbuffer :: Nil = Enum(4)
220  def needDrain(state: UInt): Bool =
221    state(1)
222  val sbuffer_state = RegInit(x_idle)
223
224  // ---------------------- Store Enq Sbuffer ---------------------
225
226  def getPTag(pa: UInt): UInt =
227    pa(PAddrBits - 1, PAddrBits - PTagWidth)
228
229  def getVTag(va: UInt): UInt =
230    va(VAddrBits - 1, VAddrBits - VTagWidth)
231
232  def getWord(pa: UInt): UInt =
233    pa(PAddrBits-1, 3)
234
235  def getVWord(pa: UInt): UInt =
236    pa(PAddrBits-1, 4)
237
238  def getWordOffset(pa: UInt): UInt =
239    pa(OffsetWidth-1, 3)
240
241  def getVWordOffset(pa: UInt): UInt =
242    pa(OffsetWidth-1, 4)
243
244  def getAddr(ptag: UInt): UInt =
245    Cat(ptag, 0.U((PAddrBits - PTagWidth).W))
246
247  def getByteOffset(offect: UInt): UInt =
248    Cat(offect(OffsetWidth - 1, 3), 0.U(3.W))
249
250  def isOneOf(key: UInt, seq: Seq[UInt]): Bool =
251    if(seq.isEmpty) false.B else Cat(seq.map(_===key)).orR()
252
253  def widthMap[T <: Data](f: Int => T) = (0 until StoreBufferSize) map f
254
255  // sbuffer entry count
256
257  val plru = new ValidPseudoLRU(StoreBufferSize)
258  val accessIdx = Wire(Vec(EnsbufferWidth + 1, Valid(UInt(SbufferIndexWidth.W))))
259
260  val candidateVec = VecInit(stateVec.map(s => s.isDcacheReqCandidate()))
261
262  val replaceAlgoIdx = plru.way(candidateVec.reverse)._2
263  val replaceAlgoNotDcacheCandidate = !stateVec(replaceAlgoIdx).isDcacheReqCandidate()
264
265  assert(!(candidateVec.asUInt().orR && replaceAlgoNotDcacheCandidate), "we have way to select, but replace algo selects invalid way")
266
267  val replaceIdx = replaceAlgoIdx
268  plru.access(accessIdx)
269
270  //-------------------------cohCount-----------------------------
271  // insert and merge: cohCount=0
272  // every cycle cohCount+=1
273  // if cohCount(EvictCountBits-1)==1, evict
274  val cohTimeOutMask = VecInit(widthMap(i => cohCount(i)(EvictCountBits - 1) && stateVec(i).isActive()))
275  val (cohTimeOutIdx, cohHasTimeOut) = PriorityEncoderWithFlag(cohTimeOutMask)
276  val cohTimeOutOH = PriorityEncoderOH(cohTimeOutMask)
277  val missqReplayTimeOutMask = VecInit(widthMap(i => missqReplayCount(i)(MissqReplayCountBits - 1) && stateVec(i).w_timeout))
278  val (missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen) = PriorityEncoderWithFlag(missqReplayTimeOutMask)
279  val missqReplayHasTimeOut = RegNext(missqReplayHasTimeOutGen) && !RegNext(sbuffer_out_s0_fire)
280  val missqReplayTimeOutIdx = RegEnable(missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen)
281
282  //-------------------------sbuffer enqueue-----------------------------
283
284  // Now sbuffer enq logic is divided into 3 stages:
285
286  // sbuffer_in_s0:
287  // * read data and meta from store queue
288  // * store them in 2 entry fifo queue
289
290  // sbuffer_in_s1:
291  // * read data and meta from fifo queue
292  // * update sbuffer meta (vtag, ptag, flag)
293  // * prevert that line from being sent to dcache (add a block condition)
294  // * prepare cacheline level write enable signal, RegNext() data and mask
295
296  // sbuffer_in_s2:
297  // * use cacheline level buffer to update sbuffer data and mask
298  // * remove dcache write block (if there is)
299
300  val activeMask = VecInit(stateVec.map(s => s.isActive()))
301  val validMask  = VecInit(stateVec.map(s => s.isValid()))
302  val drainIdx = PriorityEncoder(activeMask)
303
304  val inflightMask = VecInit(stateVec.map(s => s.isInflight()))
305
306  val inptags = io.in.map(in => getPTag(in.bits.addr))
307  val invtags = io.in.map(in => getVTag(in.bits.vaddr))
308  val sameTag = inptags(0) === inptags(1)
309  val firstWord = getVWord(io.in(0).bits.addr)
310  val secondWord = getVWord(io.in(1).bits.addr)
311  // merge condition
312  val mergeMask = Wire(Vec(EnsbufferWidth, Vec(StoreBufferSize, Bool())))
313  val mergeIdx = mergeMask.map(PriorityEncoder(_)) // avoid using mergeIdx for better timing
314  val canMerge = mergeMask.map(ParallelOR(_))
315  val mergeVec = mergeMask.map(_.asUInt)
316
317  for(i <- 0 until EnsbufferWidth){
318    mergeMask(i) := widthMap(j =>
319      inptags(i) === ptag(j) && activeMask(j)
320    )
321    assert(!(PopCount(mergeMask(i).asUInt) > 1.U && io.in(i).fire()))
322  }
323
324  // insert condition
325  // firstInsert: the first invalid entry
326  // if first entry canMerge or second entry has the same ptag with the first entry,
327  // secondInsert equal the first invalid entry, otherwise, the second invalid entry
328  val invalidMask = VecInit(stateVec.map(s => s.isInvalid()))
329  val evenInvalidMask = GetEvenBits(invalidMask.asUInt)
330  val oddInvalidMask = GetOddBits(invalidMask.asUInt)
331
332  def getFirstOneOH(input: UInt): UInt = {
333    assert(input.getWidth > 1)
334    val output = WireInit(VecInit(input.asBools))
335    (1 until input.getWidth).map(i => {
336      output(i) := !input(i - 1, 0).orR && input(i)
337    })
338    output.asUInt
339  }
340
341  val evenRawInsertVec = getFirstOneOH(evenInvalidMask)
342  val oddRawInsertVec = getFirstOneOH(oddInvalidMask)
343  val (evenRawInsertIdx, evenCanInsert) = PriorityEncoderWithFlag(evenInvalidMask)
344  val (oddRawInsertIdx, oddCanInsert) = PriorityEncoderWithFlag(oddInvalidMask)
345  val evenInsertIdx = Cat(evenRawInsertIdx, 0.U(1.W)) // slow to generate, for debug only
346  val oddInsertIdx = Cat(oddRawInsertIdx, 1.U(1.W)) // slow to generate, for debug only
347  val evenInsertVec = GetEvenBits.reverse(evenRawInsertVec)
348  val oddInsertVec = GetOddBits.reverse(oddRawInsertVec)
349
350  val enbufferSelReg = RegInit(false.B)
351  when(io.in(0).valid) {
352    enbufferSelReg := ~enbufferSelReg
353  }
354
355  val firstInsertIdx = Mux(enbufferSelReg, evenInsertIdx, oddInsertIdx) // slow to generate, for debug only
356  val secondInsertIdx = Mux(sameTag,
357    firstInsertIdx,
358    Mux(~enbufferSelReg, evenInsertIdx, oddInsertIdx)
359  ) // slow to generate, for debug only
360  val firstInsertVec = Mux(enbufferSelReg, evenInsertVec, oddInsertVec)
361  val secondInsertVec = Mux(sameTag,
362    firstInsertVec,
363    Mux(~enbufferSelReg, evenInsertVec, oddInsertVec)
364  ) // slow to generate, for debug only
365  val firstCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(enbufferSelReg, evenCanInsert, oddCanInsert)
366  val secondCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(sameTag,
367    firstCanInsert,
368    Mux(~enbufferSelReg, evenCanInsert, oddCanInsert)
369  ) && (EnsbufferWidth >= 1).B
370  val forward_need_uarch_drain = WireInit(false.B)
371  val merge_need_uarch_drain = WireInit(false.B)
372  val do_uarch_drain = RegNext(forward_need_uarch_drain) || RegNext(RegNext(merge_need_uarch_drain))
373  XSPerfAccumulate("do_uarch_drain", do_uarch_drain)
374
375  io.in(0).ready := firstCanInsert
376  io.in(1).ready := secondCanInsert && io.in(0).ready
377
378  def wordReqToBufLine( // allocate a new line in sbuffer
379    req: DCacheWordReq,
380    reqptag: UInt,
381    reqvtag: UInt,
382    insertIdx: UInt,
383    insertVec: UInt,
384    wordOffset: UInt
385  ): Unit = {
386    assert(UIntToOH(insertIdx) === insertVec)
387    val sameBlockInflightMask = genSameBlockInflightMask(reqptag)
388    (0 until StoreBufferSize).map(entryIdx => {
389      when(insertVec(entryIdx)){
390        stateVec(entryIdx).state_valid := true.B
391        stateVec(entryIdx).w_sameblock_inflight := sameBlockInflightMask.orR // set w_sameblock_inflight when a line is first allocated
392        when(sameBlockInflightMask.orR){
393          waitInflightMask(entryIdx) := sameBlockInflightMask
394        }
395        cohCount(entryIdx) := 0.U
396        // missqReplayCount(insertIdx) := 0.U
397        ptag(entryIdx) := reqptag
398        vtag(entryIdx) := reqvtag // update vtag if a new sbuffer line is allocated
399      }
400    })
401  }
402
403  def mergeWordReq( // merge write req into an existing line
404    req: DCacheWordReq,
405    reqptag: UInt,
406    reqvtag: UInt,
407    mergeIdx: UInt,
408    mergeVec: UInt,
409    wordOffset: UInt
410  ): Unit = {
411    assert(UIntToOH(mergeIdx) === mergeVec)
412    (0 until StoreBufferSize).map(entryIdx => {
413      when(mergeVec(entryIdx)) {
414        cohCount(entryIdx) := 0.U
415        // missqReplayCount(entryIdx) := 0.U
416        // check if vtag is the same, if not, trigger sbuffer flush
417        when(reqvtag =/= vtag(entryIdx)) {
418          XSDebug("reqvtag =/= sbufvtag req(vtag %x ptag %x) sbuffer(vtag %x ptag %x)\n",
419            reqvtag << OffsetWidth,
420            reqptag << OffsetWidth,
421            vtag(entryIdx) << OffsetWidth,
422            ptag(entryIdx) << OffsetWidth
423          )
424          merge_need_uarch_drain := true.B
425        }
426      }
427    })
428  }
429
430  for(((in, vwordOffset), i) <- io.in.zip(Seq(firstWord, secondWord)).zipWithIndex){
431    writeReq(i).valid := in.fire()
432    writeReq(i).bits.vwordOffset := vwordOffset
433    writeReq(i).bits.mask := in.bits.mask
434    writeReq(i).bits.data := in.bits.data
435    writeReq(i).bits.wline := in.bits.wline
436    val debug_insertIdx = if(i == 0) firstInsertIdx else secondInsertIdx
437    val insertVec = if(i == 0) firstInsertVec else secondInsertVec
438    assert(!((PopCount(insertVec) > 1.U) && in.fire()))
439    val insertIdx = OHToUInt(insertVec)
440    accessIdx(i).valid := RegNext(in.fire())
441    accessIdx(i).bits := RegNext(Mux(canMerge(i), mergeIdx(i), insertIdx))
442    when(in.fire()){
443      when(canMerge(i)){
444        writeReq(i).bits.wvec := mergeVec(i)
445        mergeWordReq(in.bits, inptags(i), invtags(i), mergeIdx(i), mergeVec(i), vwordOffset)
446        XSDebug(p"merge req $i to line [${mergeIdx(i)}]\n")
447      }.otherwise({
448        writeReq(i).bits.wvec := insertVec
449        wordReqToBufLine(in.bits, inptags(i), invtags(i), insertIdx, insertVec, vwordOffset)
450        XSDebug(p"insert req $i to line[$insertIdx]\n")
451        assert(debug_insertIdx === insertIdx)
452      })
453    }
454  }
455
456
457  for(i <- 0 until StoreBufferSize){
458    XSDebug(stateVec(i).isValid(),
459      p"[$i] timeout:${cohCount(i)(EvictCountBits-1)} state:${stateVec(i)}\n"
460    )
461  }
462
463  for((req, i) <- io.in.zipWithIndex){
464    XSDebug(req.fire(),
465      p"accept req [$i]: " +
466        p"addr:${Hexadecimal(req.bits.addr)} " +
467        p"mask:${Binary(shiftMaskToLow(req.bits.addr,req.bits.mask))} " +
468        p"data:${Hexadecimal(shiftDataToLow(req.bits.addr,req.bits.data))}\n"
469    )
470    XSDebug(req.valid && !req.ready,
471      p"req [$i] blocked by sbuffer\n"
472    )
473  }
474
475  // ---------------------- Send Dcache Req ---------------------
476
477  val sbuffer_empty = Cat(invalidMask).andR()
478  val sq_empty = !Cat(io.in.map(_.valid)).orR()
479  val empty = sbuffer_empty && sq_empty
480  val threshold = Wire(UInt(5.W)) // RegNext(io.csrCtrl.sbuffer_threshold +& 1.U)
481  threshold := Constantin.createRecord("StoreBufferThreshold_"+p(XSCoreParamsKey).HartId.toString(), initValue = 7.U)
482  val base = Wire(UInt(5.W))
483  base := Constantin.createRecord("StoreBufferBase_"+p(XSCoreParamsKey).HartId.toString(), initValue = 4.U)
484  val ActiveCount = PopCount(activeMask)
485  val ValidCount = PopCount(validMask)
486  val forceThreshold = Mux(io.force_write, threshold - base, threshold)
487  val do_eviction = RegNext(ActiveCount >= forceThreshold || ActiveCount === (StoreBufferSize-1).U || ValidCount === (StoreBufferSize).U, init = false.B)
488  require((StoreBufferThreshold + 1) <= StoreBufferSize)
489
490  XSDebug(p"ActiveCount[$ActiveCount]\n")
491
492  io.flush.empty := RegNext(empty && io.sqempty)
493  // lru.io.flush := sbuffer_state === x_drain_all && empty
494  switch(sbuffer_state){
495    is(x_idle){
496      when(io.flush.valid){
497        sbuffer_state := x_drain_all
498      }.elsewhen(do_uarch_drain){
499        sbuffer_state := x_drain_sbuffer
500      }.elsewhen(do_eviction){
501        sbuffer_state := x_replace
502      }
503    }
504    is(x_drain_all){
505      when(empty){
506        sbuffer_state := x_idle
507      }
508    }
509    is(x_drain_sbuffer){
510      when(io.flush.valid){
511        sbuffer_state := x_drain_all
512      }.elsewhen(sbuffer_empty){
513        sbuffer_state := x_idle
514      }
515    }
516    is(x_replace){
517      when(io.flush.valid){
518        sbuffer_state := x_drain_all
519      }.elsewhen(do_uarch_drain){
520        sbuffer_state := x_drain_sbuffer
521      }.elsewhen(!do_eviction){
522        sbuffer_state := x_idle
523      }
524    }
525  }
526  XSDebug(p"sbuffer state:${sbuffer_state} do eviction:${do_eviction} empty:${empty}\n")
527
528  def noSameBlockInflight(idx: UInt): Bool = {
529    // stateVec(idx) itself must not be s_inflight
530    !Cat(widthMap(i => inflightMask(i) && ptag(idx) === ptag(i))).orR()
531  }
532
533  def genSameBlockInflightMask(ptag_in: UInt): UInt = {
534    val mask = VecInit(widthMap(i => inflightMask(i) && ptag_in === ptag(i))).asUInt // quite slow, use it with care
535    assert(!(PopCount(mask) > 1.U))
536    mask
537  }
538
539  def haveSameBlockInflight(ptag_in: UInt): Bool = {
540    genSameBlockInflightMask(ptag_in).orR
541  }
542
543  // ---------------------------------------------------------------------------
544  // sbuffer to dcache pipeline
545  // ---------------------------------------------------------------------------
546
547  // Now sbuffer deq logic is divided into 2 stages:
548
549  // sbuffer_out_s0:
550  // * read data and meta from sbuffer
551  // * RegNext() them
552  // * set line state to inflight
553
554  // sbuffer_out_s1:
555  // * send write req to dcache
556
557  // sbuffer_out_extra:
558  // * receive write result from dcache
559  // * update line state
560
561  val sbuffer_out_s1_ready = Wire(Bool())
562
563  // ---------------------------------------------------------------------------
564  // sbuffer_out_s0
565  // ---------------------------------------------------------------------------
566
567  val need_drain = needDrain(sbuffer_state)
568  val need_replace = do_eviction || (sbuffer_state === x_replace)
569  val sbuffer_out_s0_evictionIdx = Mux(missqReplayHasTimeOut,
570    missqReplayTimeOutIdx,
571    Mux(need_drain,
572      drainIdx,
573      Mux(cohHasTimeOut, cohTimeOutIdx, replaceIdx)
574    )
575  )
576
577  // If there is a inflight dcache req which has same ptag with sbuffer_out_s0_evictionIdx's ptag,
578  // current eviction should be blocked.
579  val sbuffer_out_s0_valid = missqReplayHasTimeOut ||
580    stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate() &&
581    (need_drain || cohHasTimeOut || need_replace)
582  assert(!(
583    stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate &&
584    !noSameBlockInflight(sbuffer_out_s0_evictionIdx)
585  ))
586  val sbuffer_out_s0_cango = sbuffer_out_s1_ready
587  sbuffer_out_s0_fire := sbuffer_out_s0_valid && sbuffer_out_s0_cango
588
589  // ---------------------------------------------------------------------------
590  // sbuffer_out_s1
591  // ---------------------------------------------------------------------------
592
593  // TODO: use EnsbufferWidth
594  val shouldWaitWriteFinish = RegNext(VecInit((0 until EnsbufferWidth).map{i =>
595    (writeReq(i).bits.wvec.asUInt & UIntToOH(sbuffer_out_s0_evictionIdx).asUInt).orR &&
596    writeReq(i).valid
597  }).asUInt.orR)
598  // block dcache write if read / write hazard
599  val blockDcacheWrite = shouldWaitWriteFinish
600
601  val sbuffer_out_s1_valid = RegInit(false.B)
602  sbuffer_out_s1_ready := io.dcache.req.ready && !blockDcacheWrite || !sbuffer_out_s1_valid
603  val sbuffer_out_s1_fire = io.dcache.req.fire()
604
605  // when sbuffer_out_s1_fire, send dcache req stored in pipeline reg to dcache
606  when(sbuffer_out_s1_fire){
607    sbuffer_out_s1_valid := false.B
608  }
609  // when sbuffer_out_s0_fire, read dcache req data and store them in a pipeline reg
610  when(sbuffer_out_s0_cango){
611    sbuffer_out_s1_valid := sbuffer_out_s0_valid
612  }
613  when(sbuffer_out_s0_fire){
614    stateVec(sbuffer_out_s0_evictionIdx).state_inflight := true.B
615    stateVec(sbuffer_out_s0_evictionIdx).w_timeout := false.B
616    // stateVec(sbuffer_out_s0_evictionIdx).s_pipe_req := true.B
617    XSDebug(p"$sbuffer_out_s0_evictionIdx will be sent to Dcache\n")
618  }
619
620  XSDebug(p"need drain:$need_drain cohHasTimeOut: $cohHasTimeOut need replace:$need_replace\n")
621  XSDebug(p"drainIdx:$drainIdx tIdx:$cohTimeOutIdx replIdx:$replaceIdx " +
622    p"blocked:${!noSameBlockInflight(sbuffer_out_s0_evictionIdx)} v:${activeMask(sbuffer_out_s0_evictionIdx)}\n")
623  XSDebug(p"sbuffer_out_s0_valid:$sbuffer_out_s0_valid evictIdx:$sbuffer_out_s0_evictionIdx dcache ready:${io.dcache.req.ready}\n")
624  // Note: if other dcache req in the same block are inflight,
625  // the lru update may not accurate
626  accessIdx(EnsbufferWidth).valid := invalidMask(replaceIdx) || (
627    need_replace && !need_drain && !cohHasTimeOut && !missqReplayHasTimeOut && sbuffer_out_s0_cango && activeMask(replaceIdx))
628  accessIdx(EnsbufferWidth).bits := replaceIdx
629  val sbuffer_out_s1_evictionIdx = RegEnable(sbuffer_out_s0_evictionIdx, enable = sbuffer_out_s0_fire)
630  val sbuffer_out_s1_evictionPTag = RegEnable(ptag(sbuffer_out_s0_evictionIdx), enable = sbuffer_out_s0_fire)
631  val sbuffer_out_s1_evictionVTag = RegEnable(vtag(sbuffer_out_s0_evictionIdx), enable = sbuffer_out_s0_fire)
632
633  io.dcache.req.valid := sbuffer_out_s1_valid && !blockDcacheWrite
634  io.dcache.req.bits := DontCare
635  io.dcache.req.bits.cmd   := MemoryOpConstants.M_XWR
636  io.dcache.req.bits.addr  := getAddr(sbuffer_out_s1_evictionPTag)
637  io.dcache.req.bits.vaddr := getAddr(sbuffer_out_s1_evictionVTag)
638  io.dcache.req.bits.data  := data(sbuffer_out_s1_evictionIdx).asUInt
639  io.dcache.req.bits.mask  := mask(sbuffer_out_s1_evictionIdx).asUInt
640  io.dcache.req.bits.id := sbuffer_out_s1_evictionIdx
641
642  when (sbuffer_out_s1_fire) {
643    assert(!(io.dcache.req.bits.vaddr === 0.U))
644    assert(!(io.dcache.req.bits.addr === 0.U))
645  }
646
647  XSDebug(sbuffer_out_s1_fire,
648    p"send buf [$sbuffer_out_s1_evictionIdx] to Dcache, req fire\n"
649  )
650
651  // update sbuffer status according to dcache resp source
652
653  def id_to_sbuffer_id(id: UInt): UInt = {
654    require(id.getWidth >= log2Up(StoreBufferSize))
655    id(log2Up(StoreBufferSize)-1, 0)
656  }
657
658  // hit resp
659  io.dcache.hit_resps.map(resp => {
660    val dcache_resp_id = resp.bits.id
661    when (resp.fire()) {
662      stateVec(dcache_resp_id).state_inflight := false.B
663      stateVec(dcache_resp_id).state_valid := false.B
664      assert(!resp.bits.replay)
665      assert(!resp.bits.miss) // not need to resp if miss, to be opted
666      assert(stateVec(dcache_resp_id).state_inflight === true.B)
667    }
668
669    // Update w_sameblock_inflight flag is delayed for 1 cycle
670    //
671    // When a new req allocate a new line in sbuffer, sameblock_inflight check will ignore
672    // current dcache.hit_resps. Then, in the next cycle, we have plenty of time to check
673    // if the same block is still inflight
674    (0 until StoreBufferSize).map(i => {
675      when(
676        stateVec(i).w_sameblock_inflight &&
677        stateVec(i).state_valid &&
678        RegNext(resp.fire()) &&
679        waitInflightMask(i) === UIntToOH(RegNext(id_to_sbuffer_id(dcache_resp_id)))
680      ){
681        stateVec(i).w_sameblock_inflight := false.B
682      }
683    })
684  })
685
686  io.dcache.hit_resps.zip(dataModule.io.maskFlushReq).map{case (resp, maskFlush) => {
687    maskFlush.valid := resp.fire()
688    maskFlush.bits.wvec := UIntToOH(resp.bits.id)
689  }}
690
691  // replay resp
692  val replay_resp_id = io.dcache.replay_resp.bits.id
693  when (io.dcache.replay_resp.fire()) {
694    missqReplayCount(replay_resp_id) := 0.U
695    stateVec(replay_resp_id).w_timeout := true.B
696    // waiting for timeout
697    assert(io.dcache.replay_resp.bits.replay)
698    assert(stateVec(replay_resp_id).state_inflight === true.B)
699  }
700
701  // TODO: reuse cohCount
702  (0 until StoreBufferSize).map(i => {
703    when(stateVec(i).w_timeout && stateVec(i).state_inflight && !missqReplayCount(i)(MissqReplayCountBits-1)) {
704      missqReplayCount(i) := missqReplayCount(i) + 1.U
705    }
706    when(activeMask(i) && !cohTimeOutMask(i)){
707      cohCount(i) := cohCount(i)+1.U
708    }
709  })
710
711  if (env.EnableDifftest) {
712    // hit resp
713    io.dcache.hit_resps.zipWithIndex.map{case (resp, index) => {
714      val difftest = Module(new DifftestSbufferEvent)
715      val dcache_resp_id = resp.bits.id
716      difftest.io.clock := clock
717      difftest.io.coreid := io.hartId
718      difftest.io.index := index.U
719      difftest.io.sbufferResp := RegNext(resp.fire())
720      difftest.io.sbufferAddr := RegNext(getAddr(ptag(dcache_resp_id)))
721      difftest.io.sbufferData := RegNext(data(dcache_resp_id).asTypeOf(Vec(CacheLineBytes, UInt(8.W))))
722      difftest.io.sbufferMask := RegNext(mask(dcache_resp_id).asUInt)
723    }}
724  }
725
726  // ---------------------- Load Data Forward ---------------------
727  val mismatch = Wire(Vec(LoadPipelineWidth, Bool()))
728  XSPerfAccumulate("vaddr_match_failed", mismatch(0) || mismatch(1))
729  for ((forward, i) <- io.forward.zipWithIndex) {
730    val vtag_matches = VecInit(widthMap(w => vtag(w) === getVTag(forward.vaddr)))
731    // ptag_matches uses paddr from dtlb, which is far from sbuffer
732    val ptag_matches = VecInit(widthMap(w => RegEnable(ptag(w), forward.valid) === RegEnable(getPTag(forward.paddr), forward.valid)))
733    val tag_matches = vtag_matches
734    val tag_mismatch = RegNext(forward.valid) && VecInit(widthMap(w =>
735      RegNext(vtag_matches(w)) =/= ptag_matches(w) && RegNext((activeMask(w) || inflightMask(w)))
736    )).asUInt.orR
737    mismatch(i) := tag_mismatch
738    when (tag_mismatch) {
739      XSDebug("forward tag mismatch: pmatch %x vmatch %x vaddr %x paddr %x\n",
740        RegNext(ptag_matches.asUInt),
741        RegNext(vtag_matches.asUInt),
742        RegNext(forward.vaddr),
743        RegNext(forward.paddr)
744      )
745      forward_need_uarch_drain := true.B
746    }
747    val valid_tag_matches = widthMap(w => tag_matches(w) && activeMask(w))
748    val inflight_tag_matches = widthMap(w => tag_matches(w) && inflightMask(w))
749    val line_offset_mask = UIntToOH(getVWordOffset(forward.paddr))
750
751    val valid_tag_match_reg = valid_tag_matches.map(RegNext(_))
752    val inflight_tag_match_reg = inflight_tag_matches.map(RegNext(_))
753    val line_offset_reg = RegNext(line_offset_mask)
754    val forward_mask_candidate_reg = RegEnable(
755      VecInit(mask.map(entry => entry(getVWordOffset(forward.paddr)))),
756      forward.valid
757    )
758    val forward_data_candidate_reg = RegEnable(
759      VecInit(data.map(entry => entry(getVWordOffset(forward.paddr)))),
760      forward.valid
761    )
762
763    val selectedValidMask = Mux1H(valid_tag_match_reg, forward_mask_candidate_reg)
764    val selectedValidData = Mux1H(valid_tag_match_reg, forward_data_candidate_reg)
765    selectedValidMask.suggestName("selectedValidMask_"+i)
766    selectedValidData.suggestName("selectedValidData_"+i)
767
768    val selectedInflightMask = Mux1H(inflight_tag_match_reg, forward_mask_candidate_reg)
769    val selectedInflightData = Mux1H(inflight_tag_match_reg, forward_data_candidate_reg)
770    selectedInflightMask.suggestName("selectedInflightMask_"+i)
771    selectedInflightData.suggestName("selectedInflightData_"+i)
772
773    // currently not being used
774    val selectedInflightMaskFast = Mux1H(line_offset_mask, Mux1H(inflight_tag_matches, mask).asTypeOf(Vec(CacheLineVWords, Vec(VDataBytes, Bool()))))
775    val selectedValidMaskFast = Mux1H(line_offset_mask, Mux1H(valid_tag_matches, mask).asTypeOf(Vec(CacheLineVWords, Vec(VDataBytes, Bool()))))
776
777    forward.dataInvalid := false.B // data in store line merge buffer is always ready
778    forward.matchInvalid := tag_mismatch // paddr / vaddr cam result does not match
779    for (j <- 0 until VDataBytes) {
780      forward.forwardMask(j) := false.B
781      forward.forwardData(j) := DontCare
782
783      // valid entries have higher priority than inflight entries
784      when(selectedInflightMask(j)) {
785        forward.forwardMask(j) := true.B
786        forward.forwardData(j) := selectedInflightData(j)
787      }
788      when(selectedValidMask(j)) {
789        forward.forwardMask(j) := true.B
790        forward.forwardData(j) := selectedValidData(j)
791      }
792
793      forward.forwardMaskFast(j) := selectedInflightMaskFast(j) || selectedValidMaskFast(j)
794    }
795    forward.addrInvalid := DontCare
796  }
797
798  for (i <- 0 until StoreBufferSize) {
799    XSDebug("sbf entry " + i + " : ptag %x vtag %x valid %x active %x inflight %x w_timeout %x\n",
800      ptag(i) << OffsetWidth,
801      vtag(i) << OffsetWidth,
802      stateVec(i).isValid(),
803      activeMask(i),
804      inflightMask(i),
805      stateVec(i).w_timeout
806    )
807  }
808
809  val perf_valid_entry_count = RegNext(PopCount(VecInit(stateVec.map(s => !s.isInvalid())).asUInt))
810  XSPerfHistogram("util", perf_valid_entry_count, true.B, 0, StoreBufferSize, 1)
811  XSPerfAccumulate("sbuffer_req_valid", PopCount(VecInit(io.in.map(_.valid)).asUInt))
812  XSPerfAccumulate("sbuffer_req_fire", PopCount(VecInit(io.in.map(_.fire())).asUInt))
813  XSPerfAccumulate("sbuffer_merge", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && canMerge(i)})).asUInt))
814  XSPerfAccumulate("sbuffer_newline", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && !canMerge(i)})).asUInt))
815  XSPerfAccumulate("dcache_req_valid", io.dcache.req.valid)
816  XSPerfAccumulate("dcache_req_fire", io.dcache.req.fire())
817  XSPerfAccumulate("sbuffer_idle", sbuffer_state === x_idle)
818  XSPerfAccumulate("sbuffer_flush", sbuffer_state === x_drain_sbuffer)
819  XSPerfAccumulate("sbuffer_replace", sbuffer_state === x_replace)
820  XSPerfAccumulate("evenCanInsert", evenCanInsert)
821  XSPerfAccumulate("oddCanInsert", oddCanInsert)
822  XSPerfAccumulate("mainpipe_resp_valid", io.dcache.main_pipe_hit_resp.fire())
823  XSPerfAccumulate("refill_resp_valid", io.dcache.refill_hit_resp.fire())
824  XSPerfAccumulate("replay_resp_valid", io.dcache.replay_resp.fire())
825  XSPerfAccumulate("coh_timeout", cohHasTimeOut)
826
827  // val (store_latency_sample, store_latency) = TransactionLatencyCounter(io.lsu.req.fire(), io.lsu.resp.fire())
828  // XSPerfHistogram("store_latency", store_latency, store_latency_sample, 0, 100, 10)
829  // XSPerfAccumulate("store_req", io.lsu.req.fire())
830
831  val perfEvents = Seq(
832    ("sbuffer_req_valid ", PopCount(VecInit(io.in.map(_.valid)).asUInt)                                                                ),
833    ("sbuffer_req_fire  ", PopCount(VecInit(io.in.map(_.fire())).asUInt)                                                               ),
834    ("sbuffer_merge     ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && canMerge(i)})).asUInt)                ),
835    ("sbuffer_newline   ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && !canMerge(i)})).asUInt)               ),
836    ("dcache_req_valid  ", io.dcache.req.valid                                                                                         ),
837    ("dcache_req_fire   ", io.dcache.req.fire()                                                                                        ),
838    ("sbuffer_idle      ", sbuffer_state === x_idle                                                                                    ),
839    ("sbuffer_flush     ", sbuffer_state === x_drain_sbuffer                                                                           ),
840    ("sbuffer_replace   ", sbuffer_state === x_replace                                                                                 ),
841    ("mpipe_resp_valid  ", io.dcache.main_pipe_hit_resp.fire()                                                                         ),
842    ("refill_resp_valid ", io.dcache.refill_hit_resp.fire()                                                                            ),
843    ("replay_resp_valid ", io.dcache.replay_resp.fire()                                                                                ),
844    ("coh_timeout       ", cohHasTimeOut                                                                                               ),
845    ("sbuffer_1_4_valid ", (perf_valid_entry_count < (StoreBufferSize.U/4.U))                                                          ),
846    ("sbuffer_2_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/4.U)) & (perf_valid_entry_count <= (StoreBufferSize.U/2.U))    ),
847    ("sbuffer_3_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/2.U)) & (perf_valid_entry_count <= (StoreBufferSize.U*3.U/4.U))),
848    ("sbuffer_full_valid", (perf_valid_entry_count > (StoreBufferSize.U*3.U/4.U)))
849  )
850  generatePerfEvent()
851
852}
853