xref: /XiangShan/src/main/scala/xiangshan/mem/sbuffer/Sbuffer.scala (revision 779faf1201ad09c3e3f0faef77ebebb4ddd3522f)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import xiangshan._
23import utils._
24import xiangshan.cache._
25import difftest._
26import freechips.rocketchip.util._
27
28class SbufferFlushBundle extends Bundle {
29  val valid = Output(Bool())
30  val empty = Input(Bool())
31}
32
33trait HasSbufferConst extends HasXSParameter {
34  val EvictCycles = 1 << 20
35  val SbufferReplayDelayCycles = 16
36  require(isPow2(EvictCycles))
37  val EvictCountBits = log2Up(EvictCycles+1)
38  val MissqReplayCountBits = log2Up(SbufferReplayDelayCycles) + 1
39
40  val SbufferIndexWidth: Int = log2Up(StoreBufferSize)
41  // paddr = ptag + offset
42  val CacheLineBytes: Int = CacheLineSize / 8
43  val CacheLineWords: Int = CacheLineBytes / DataBytes
44  val OffsetWidth: Int = log2Up(CacheLineBytes)
45  val WordsWidth: Int = log2Up(CacheLineWords)
46  val PTagWidth: Int = PAddrBits - OffsetWidth
47  val VTagWidth: Int = VAddrBits - OffsetWidth
48  val WordOffsetWidth: Int = PAddrBits - WordsWidth
49}
50
51class SbufferEntryState (implicit p: Parameters) extends SbufferBundle {
52  val state_valid    = Bool() // this entry is active
53  val state_inflight = Bool() // sbuffer is trying to write this entry to dcache
54  val w_timeout = Bool() // with timeout resp, waiting for resend store pipeline req timeout
55  val w_sameblock_inflight = Bool() // same cache block dcache req is inflight
56
57  def isInvalid(): Bool = !state_valid
58  def isValid(): Bool = state_valid
59  def isActive(): Bool = state_valid && !state_inflight
60  def isInflight(): Bool = state_inflight
61  def isDcacheReqCandidate(): Bool = state_valid && !state_inflight && !w_sameblock_inflight
62}
63
64class SbufferBundle(implicit p: Parameters) extends XSBundle with HasSbufferConst
65
66class DataWriteReq(implicit p: Parameters) extends SbufferBundle {
67  // univerisal writemask
68  val wvec = UInt(StoreBufferSize.W)
69  // 2 cycle update
70  val mask = UInt((DataBits/8).W)
71  val data = UInt(DataBits.W)
72  val wordOffset = UInt(WordOffsetWidth.W)
73  val wline = Bool() // write whold cacheline
74  // 1 cycle update
75  val cleanMask = Bool() // set whole line's mask to 0
76}
77
78class SbufferData(implicit p: Parameters) extends XSModule with HasSbufferConst {
79  val io = IO(new Bundle(){
80    val writeReq = Vec(EnsbufferWidth, Flipped(ValidIO(new DataWriteReq)))
81    val dataOut = Output(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, UInt(8.W)))))
82    val maskOut = Output(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
83  })
84
85  val data = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, UInt(8.W)))))
86  val mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
87
88  // 2 cycle data / mask update
89  for(i <- 0 until EnsbufferWidth) {
90    val req = io.writeReq(i)
91    for(line <- 0 until StoreBufferSize){
92      val sbuffer_in_s1_line_wen = req.valid && req.bits.wvec(line)
93      val sbuffer_in_s2_line_wen = RegNext(sbuffer_in_s1_line_wen)
94      val line_write_buffer_data = RegEnable(req.bits.data, sbuffer_in_s1_line_wen)
95      val line_write_buffer_wline = RegEnable(req.bits.wline, sbuffer_in_s1_line_wen)
96      val line_write_buffer_mask = RegEnable(req.bits.mask, sbuffer_in_s1_line_wen)
97      val line_write_buffer_offset = RegEnable(req.bits.wordOffset(WordsWidth-1, 0), sbuffer_in_s1_line_wen)
98      sbuffer_in_s1_line_wen.suggestName("sbuffer_in_s1_line_wen_"+line)
99      sbuffer_in_s2_line_wen.suggestName("sbuffer_in_s2_line_wen_"+line)
100      line_write_buffer_data.suggestName("line_write_buffer_data_"+line)
101      line_write_buffer_wline.suggestName("line_write_buffer_wline_"+line)
102      line_write_buffer_mask.suggestName("line_write_buffer_mask_"+line)
103      line_write_buffer_offset.suggestName("line_write_buffer_offset_"+line)
104      for(word <- 0 until CacheLineWords){
105        for(byte <- 0 until DataBytes){
106          val write_byte = sbuffer_in_s2_line_wen && (
107            line_write_buffer_mask(byte) && (line_write_buffer_offset === word.U) ||
108            line_write_buffer_wline
109          )
110          when(write_byte){
111            data(line)(word)(byte) := line_write_buffer_data(byte*8+7, byte*8)
112            mask(line)(word)(byte) := true.B
113          }
114        }
115      }
116    }
117  }
118
119  // 1 cycle line mask clean
120  for(i <- 0 until EnsbufferWidth) {
121    val req = io.writeReq(i)
122    when(req.valid){
123      for(line <- 0 until StoreBufferSize){
124        when(
125          req.bits.wvec(line) &&
126          req.bits.cleanMask
127        ){
128          for(word <- 0 until CacheLineWords){
129            for(byte <- 0 until DataBytes){
130              mask(line)(word)(byte) := false.B
131              val debug_last_cycle_write_byte = RegNext(req.valid && req.bits.wvec(line) && (
132                req.bits.mask(byte) && (req.bits.wordOffset(WordsWidth-1, 0) === word.U) ||
133                req.bits.wline
134              ))
135              assert(!debug_last_cycle_write_byte)
136            }
137          }
138        }
139      }
140    }
141  }
142
143  io.dataOut := data
144  io.maskOut := mask
145}
146
147class Sbuffer(implicit p: Parameters) extends DCacheModule with HasSbufferConst with HasPerfEvents {
148  val io = IO(new Bundle() {
149    val hartId = Input(UInt(8.W))
150    val in = Vec(EnsbufferWidth, Flipped(Decoupled(new DCacheWordReqWithVaddr)))  //Todo: store logic only support Width == 2 now
151    val dcache = Flipped(new DCacheToSbufferIO)
152    val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
153    val sqempty = Input(Bool())
154    val flush = Flipped(new SbufferFlushBundle)
155    val csrCtrl = Flipped(new CustomCSRCtrlIO)
156  })
157
158  val dataModule = Module(new SbufferData)
159  dataModule.io.writeReq <> DontCare
160  val writeReq = dataModule.io.writeReq
161
162  val ptag = Reg(Vec(StoreBufferSize, UInt(PTagWidth.W)))
163  val vtag = Reg(Vec(StoreBufferSize, UInt(VTagWidth.W)))
164  val debug_mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
165  val waitInflightMask = Reg(Vec(StoreBufferSize, UInt(StoreBufferSize.W)))
166  val data = dataModule.io.dataOut
167  val mask = dataModule.io.maskOut
168  val stateVec = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U.asTypeOf(new SbufferEntryState))))
169  val cohCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(EvictCountBits.W))))
170  val missqReplayCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(MissqReplayCountBits.W))))
171
172  val sbuffer_out_s0_fire = Wire(Bool())
173
174  /*
175       idle --[flush]   --> drain   --[buf empty]--> idle
176            --[buf full]--> replace --[dcache resp]--> idle
177  */
178  // x_drain_all: drain store queue and sbuffer
179  // x_drain_sbuffer: drain sbuffer only, block store queue to sbuffer write
180  val x_idle :: x_replace :: x_drain_all :: x_drain_sbuffer :: Nil = Enum(4)
181  def needDrain(state: UInt): Bool =
182    state(1)
183  val sbuffer_state = RegInit(x_idle)
184
185  // ---------------------- Store Enq Sbuffer ---------------------
186
187  def getPTag(pa: UInt): UInt =
188    pa(PAddrBits - 1, PAddrBits - PTagWidth)
189
190  def getVTag(va: UInt): UInt =
191    va(VAddrBits - 1, VAddrBits - VTagWidth)
192
193  def getWord(pa: UInt): UInt =
194    pa(PAddrBits-1, 3)
195
196  def getWordOffset(pa: UInt): UInt =
197    pa(OffsetWidth-1, 3)
198
199  def getAddr(ptag: UInt): UInt =
200    Cat(ptag, 0.U((PAddrBits - PTagWidth).W))
201
202  def getByteOffset(offect: UInt): UInt =
203    Cat(offect(OffsetWidth - 1, 3), 0.U(3.W))
204
205  def isOneOf(key: UInt, seq: Seq[UInt]): Bool =
206    if(seq.isEmpty) false.B else Cat(seq.map(_===key)).orR()
207
208  def widthMap[T <: Data](f: Int => T) = (0 until StoreBufferSize) map f
209
210  // sbuffer entry count
211
212  val plru = new PseudoLRU(StoreBufferSize)
213  val accessIdx = Wire(Vec(EnsbufferWidth + 1, Valid(UInt(SbufferIndexWidth.W))))
214
215  val replaceIdx = plru.way
216  val replaceIdxOH = UIntToOH(plru.way)
217  plru.access(accessIdx)
218
219  //-------------------------cohCount-----------------------------
220  // insert and merge: cohCount=0
221  // every cycle cohCount+=1
222  // if cohCount(EvictCountBits-1)==1, evict
223  val cohTimeOutMask = VecInit(widthMap(i => cohCount(i)(EvictCountBits - 1) && stateVec(i).isActive()))
224  val (cohTimeOutIdx, cohHasTimeOut) = PriorityEncoderWithFlag(cohTimeOutMask)
225  val cohTimeOutOH = PriorityEncoderOH(cohTimeOutMask)
226  val missqReplayTimeOutMask = VecInit(widthMap(i => missqReplayCount(i)(MissqReplayCountBits - 1) && stateVec(i).w_timeout))
227  val (missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen) = PriorityEncoderWithFlag(missqReplayTimeOutMask)
228  val missqReplayHasTimeOut = RegNext(missqReplayHasTimeOutGen) && !RegNext(sbuffer_out_s0_fire)
229  val missqReplayTimeOutIdx = RegEnable(missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen)
230
231  //-------------------------sbuffer enqueue-----------------------------
232
233  // Now sbuffer enq logic is divided into 3 stages:
234
235  // sbuffer_in_s0:
236  // * read data and meta from store queue
237  // * store them in 2 entry fifo queue
238
239  // sbuffer_in_s1:
240  // * read data and meta from fifo queue
241  // * update sbuffer meta (vtag, ptag, flag)
242  // * prevert that line from being sent to dcache (add a block condition)
243  // * prepare cacheline level write enable signal, RegNext() data and mask
244
245  // sbuffer_in_s2:
246  // * use cacheline level buffer to update sbuffer data and mask
247  // * remove dcache write block (if there is)
248
249  val activeMask = VecInit(stateVec.map(s => s.isActive()))
250  val drainIdx = PriorityEncoder(activeMask)
251
252  val inflightMask = VecInit(stateVec.map(s => s.isInflight()))
253
254  val inptags = io.in.map(in => getPTag(in.bits.addr))
255  val invtags = io.in.map(in => getVTag(in.bits.vaddr))
256  val sameTag = inptags(0) === inptags(1)
257  val firstWord = getWord(io.in(0).bits.addr)
258  val secondWord = getWord(io.in(1).bits.addr)
259  val sameWord = firstWord === secondWord
260
261  // merge condition
262  val mergeMask = Wire(Vec(EnsbufferWidth, Vec(StoreBufferSize, Bool())))
263  val mergeIdx = mergeMask.map(PriorityEncoder(_)) // avoid using mergeIdx for better timing
264  val canMerge = mergeMask.map(ParallelOR(_))
265  val mergeVec = mergeMask.map(_.asUInt)
266
267  for(i <- 0 until EnsbufferWidth){
268    mergeMask(i) := widthMap(j =>
269      inptags(i) === ptag(j) && activeMask(j)
270    )
271    assert(!(PopCount(mergeMask(i).asUInt) > 1.U && io.in(i).fire()))
272  }
273
274  // insert condition
275  // firstInsert: the first invalid entry
276  // if first entry canMerge or second entry has the same ptag with the first entry,
277  // secondInsert equal the first invalid entry, otherwise, the second invalid entry
278  val invalidMask = VecInit(stateVec.map(s => s.isInvalid()))
279  val evenInvalidMask = GetEvenBits(invalidMask.asUInt)
280  val oddInvalidMask = GetOddBits(invalidMask.asUInt)
281
282  def getFirstOneOH(input: UInt): UInt = {
283    assert(input.getWidth > 1)
284    val output = WireInit(VecInit(input.asBools))
285    (1 until input.getWidth).map(i => {
286      output(i) := !input(i - 1, 0).orR && input(i)
287    })
288    output.asUInt
289  }
290
291  val evenRawInsertVec = getFirstOneOH(evenInvalidMask)
292  val oddRawInsertVec = getFirstOneOH(oddInvalidMask)
293  val (evenRawInsertIdx, evenCanInsert) = PriorityEncoderWithFlag(evenInvalidMask)
294  val (oddRawInsertIdx, oddCanInsert) = PriorityEncoderWithFlag(oddInvalidMask)
295  val evenInsertIdx = Cat(evenRawInsertIdx, 0.U(1.W)) // slow to generate, for debug only
296  val oddInsertIdx = Cat(oddRawInsertIdx, 1.U(1.W)) // slow to generate, for debug only
297  val evenInsertVec = GetEvenBits.reverse(evenRawInsertVec)
298  val oddInsertVec = GetOddBits.reverse(oddRawInsertVec)
299
300  val enbufferSelReg = RegInit(false.B)
301  when(io.in(0).valid) {
302    enbufferSelReg := ~enbufferSelReg
303  }
304
305  val firstInsertIdx = Mux(enbufferSelReg, evenInsertIdx, oddInsertIdx) // slow to generate, for debug only
306  val secondInsertIdx = Mux(sameTag,
307    firstInsertIdx,
308    Mux(~enbufferSelReg, evenInsertIdx, oddInsertIdx)
309  ) // slow to generate, for debug only
310  val firstInsertVec = Mux(enbufferSelReg, evenInsertVec, oddInsertVec)
311  val secondInsertVec = Mux(sameTag,
312    firstInsertVec,
313    Mux(~enbufferSelReg, evenInsertVec, oddInsertVec)
314  ) // slow to generate, for debug only
315  val firstCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(enbufferSelReg, evenCanInsert, oddCanInsert)
316  val secondCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(sameTag,
317    firstCanInsert,
318    Mux(~enbufferSelReg, evenCanInsert, oddCanInsert)
319  ) && (EnsbufferWidth >= 1).B
320  val forward_need_uarch_drain = WireInit(false.B)
321  val merge_need_uarch_drain = WireInit(false.B)
322  val do_uarch_drain = RegNext(forward_need_uarch_drain) || RegNext(RegNext(merge_need_uarch_drain))
323  XSPerfAccumulate("do_uarch_drain", do_uarch_drain)
324
325  io.in(0).ready := firstCanInsert
326  io.in(1).ready := secondCanInsert && !sameWord && io.in(0).ready
327
328  def wordReqToBufLine( // allocate a new line in sbuffer
329    req: DCacheWordReq,
330    reqptag: UInt,
331    reqvtag: UInt,
332    insertIdx: UInt,
333    insertVec: UInt,
334    wordOffset: UInt,
335    flushMask: Bool
336  ): Unit = {
337    assert(UIntToOH(insertIdx) === insertVec)
338    val sameBlockInflightMask = genSameBlockInflightMask(reqptag)
339    (0 until StoreBufferSize).map(entryIdx => {
340      when(insertVec(entryIdx)){
341        stateVec(entryIdx).state_valid := true.B
342        stateVec(entryIdx).w_sameblock_inflight := sameBlockInflightMask.orR // set w_sameblock_inflight when a line is first allocated
343        when(sameBlockInflightMask.orR){
344          waitInflightMask(entryIdx) := sameBlockInflightMask
345        }
346        cohCount(entryIdx) := 0.U
347        // missqReplayCount(insertIdx) := 0.U
348        ptag(entryIdx) := reqptag
349        vtag(entryIdx) := reqvtag // update vtag iff a new sbuffer line is allocated
350      }
351    })
352  }
353
354  def mergeWordReq( // merge write req into an existing line
355    req: DCacheWordReq,
356    reqptag: UInt,
357    reqvtag: UInt,
358    mergeIdx: UInt,
359    mergeVec: UInt,
360    wordOffset: UInt
361  ): Unit = {
362    assert(UIntToOH(mergeIdx) === mergeVec)
363    (0 until StoreBufferSize).map(entryIdx => {
364      when(mergeVec(entryIdx)) {
365        cohCount(entryIdx) := 0.U
366        // missqReplayCount(entryIdx) := 0.U
367        // check if vtag is the same, if not, trigger sbuffer flush
368        when(reqvtag =/= vtag(entryIdx)) {
369          XSDebug("reqvtag =/= sbufvtag req(vtag %x ptag %x) sbuffer(vtag %x ptag %x)\n",
370            reqvtag << OffsetWidth,
371            reqptag << OffsetWidth,
372            vtag(entryIdx) << OffsetWidth,
373            ptag(entryIdx) << OffsetWidth
374          )
375          merge_need_uarch_drain := true.B
376        }
377      }
378    })
379  }
380
381  for(((in, wordOffset), i) <- io.in.zip(Seq(firstWord, secondWord)).zipWithIndex){
382    writeReq(i).valid := in.fire()
383    writeReq(i).bits.wordOffset := wordOffset
384    writeReq(i).bits.mask := in.bits.mask
385    writeReq(i).bits.data := in.bits.data
386    writeReq(i).bits.wline := in.bits.wline
387    writeReq(i).bits.cleanMask := false.B
388    val debug_insertIdx = if(i == 0) firstInsertIdx else secondInsertIdx
389    val insertVec = if(i == 0) firstInsertVec else secondInsertVec
390    assert(!((PopCount(insertVec) > 1.U) && in.fire()))
391    val insertIdx = OHToUInt(insertVec)
392    val flushMask = if(i == 0) true.B else !sameTag
393    accessIdx(i).valid := RegNext(in.fire())
394    accessIdx(i).bits := RegNext(Mux(canMerge(i), mergeIdx(i), insertIdx))
395    when(in.fire()){
396      when(canMerge(i)){
397        writeReq(i).bits.wvec := mergeVec(i)
398        mergeWordReq(in.bits, inptags(i), invtags(i), mergeIdx(i), mergeVec(i), wordOffset)
399        XSDebug(p"merge req $i to line [${mergeIdx(i)}]\n")
400      }.otherwise({
401        writeReq(i).bits.wvec := insertVec
402        writeReq(i).bits.cleanMask := flushMask
403        wordReqToBufLine(in.bits, inptags(i), invtags(i), insertIdx, insertVec, wordOffset, flushMask)
404        XSDebug(p"insert req $i to line[$insertIdx]\n")
405        assert(debug_insertIdx === insertIdx)
406      })
407    }
408  }
409
410
411  for(i <- 0 until StoreBufferSize){
412    XSDebug(stateVec(i).isValid(),
413      p"[$i] timeout:${cohCount(i)(EvictCountBits-1)} state:${stateVec(i)}\n"
414    )
415  }
416
417  for((req, i) <- io.in.zipWithIndex){
418    XSDebug(req.fire(),
419      p"accept req [$i]: " +
420        p"addr:${Hexadecimal(req.bits.addr)} " +
421        p"mask:${Binary(req.bits.mask)} " +
422        p"data:${Hexadecimal(req.bits.data)}\n"
423    )
424    XSDebug(req.valid && !req.ready,
425      p"req [$i] blocked by sbuffer\n"
426    )
427  }
428
429  // ---------------------- Send Dcache Req ---------------------
430
431  val sbuffer_empty = Cat(invalidMask).andR()
432  val sq_empty = !Cat(io.in.map(_.valid)).orR()
433  val empty = sbuffer_empty && sq_empty
434  val threshold = RegNext(io.csrCtrl.sbuffer_threshold +& 1.U)
435  val validCount = PopCount(activeMask)
436  val do_eviction = RegNext(validCount >= threshold || validCount === (StoreBufferSize-1).U, init = false.B)
437  require((StoreBufferThreshold + 1) <= StoreBufferSize)
438
439  XSDebug(p"validCount[$validCount]\n")
440
441  io.flush.empty := RegNext(empty && io.sqempty)
442  // lru.io.flush := sbuffer_state === x_drain_all && empty
443  switch(sbuffer_state){
444    is(x_idle){
445      when(io.flush.valid){
446        sbuffer_state := x_drain_all
447      }.elsewhen(do_uarch_drain){
448        sbuffer_state := x_drain_sbuffer
449      }.elsewhen(do_eviction){
450        sbuffer_state := x_replace
451      }
452    }
453    is(x_drain_all){
454      when(empty){
455        sbuffer_state := x_idle
456      }
457    }
458    is(x_drain_sbuffer){
459      when(io.flush.valid){
460        sbuffer_state := x_drain_all
461      }.elsewhen(sbuffer_empty){
462        sbuffer_state := x_idle
463      }
464    }
465    is(x_replace){
466      when(io.flush.valid){
467        sbuffer_state := x_drain_all
468      }.elsewhen(do_uarch_drain){
469        sbuffer_state := x_drain_sbuffer
470      }.elsewhen(!do_eviction){
471        sbuffer_state := x_idle
472      }
473    }
474  }
475  XSDebug(p"sbuffer state:${sbuffer_state} do eviction:${do_eviction} empty:${empty}\n")
476
477  def noSameBlockInflight(idx: UInt): Bool = {
478    // stateVec(idx) itself must not be s_inflight
479    !Cat(widthMap(i => inflightMask(i) && ptag(idx) === ptag(i))).orR()
480  }
481
482  def genSameBlockInflightMask(ptag_in: UInt): UInt = {
483    val mask = VecInit(widthMap(i => inflightMask(i) && ptag_in === ptag(i))).asUInt // quite slow, use it with care
484    assert(!(PopCount(mask) > 1.U))
485    mask
486  }
487
488  def haveSameBlockInflight(ptag_in: UInt): Bool = {
489    genSameBlockInflightMask(ptag_in).orR
490  }
491
492  // ---------------------------------------------------------------------------
493  // sbuffer to dcache pipeline
494  // ---------------------------------------------------------------------------
495
496  // Now sbuffer deq logic is divided into 2 stages:
497
498  // sbuffer_out_s0:
499  // * read data and meta from sbuffer
500  // * RegNext() them
501  // * set line state to inflight
502
503  // sbuffer_out_s1:
504  // * send write req to dcache
505
506  // sbuffer_out_extra:
507  // * receive write result from dcache
508  // * update line state
509
510  val sbuffer_out_s1_ready = Wire(Bool())
511
512  // ---------------------------------------------------------------------------
513  // sbuffer_out_s0
514  // ---------------------------------------------------------------------------
515
516  val need_drain = needDrain(sbuffer_state)
517  val need_replace = do_eviction || (sbuffer_state === x_replace)
518  val sbuffer_out_s0_evictionIdx = Mux(missqReplayHasTimeOut,
519    missqReplayTimeOutIdx,
520    Mux(need_drain,
521      drainIdx,
522      Mux(cohHasTimeOut, cohTimeOutIdx, replaceIdx)
523    )
524  )
525
526  // If there is a inflight dcache req which has same ptag with sbuffer_out_s0_evictionIdx's ptag,
527  // current eviction should be blocked.
528  val sbuffer_out_s0_valid = missqReplayHasTimeOut ||
529    stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate() &&
530    (need_drain || cohHasTimeOut || need_replace)
531  assert(!(
532    stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate &&
533    !noSameBlockInflight(sbuffer_out_s0_evictionIdx)
534  ))
535  val sbuffer_out_s0_cango = sbuffer_out_s1_ready
536  sbuffer_out_s0_fire := sbuffer_out_s0_valid && sbuffer_out_s0_cango
537
538  // ---------------------------------------------------------------------------
539  // sbuffer_out_s1
540  // ---------------------------------------------------------------------------
541
542  // TODO: use EnsbufferWidth
543  val shouldWaitWriteFinish = RegNext(VecInit((0 until EnsbufferWidth).map{i =>
544    (writeReq(i).bits.wvec.asUInt & UIntToOH(sbuffer_out_s0_evictionIdx).asUInt).orR &&
545    writeReq(i).valid
546  }).asUInt.orR)
547  // block dcache write if read / write hazard
548  val blockDcacheWrite = shouldWaitWriteFinish
549
550  val sbuffer_out_s1_valid = RegInit(false.B)
551  sbuffer_out_s1_ready := io.dcache.req.ready && !blockDcacheWrite || !sbuffer_out_s1_valid
552  val sbuffer_out_s1_fire = io.dcache.req.fire()
553
554  // when sbuffer_out_s1_fire, send dcache req stored in pipeline reg to dcache
555  when(sbuffer_out_s1_fire){
556    sbuffer_out_s1_valid := false.B
557  }
558  // when sbuffer_out_s0_fire, read dcache req data and store them in a pipeline reg
559  when(sbuffer_out_s0_cango){
560    sbuffer_out_s1_valid := sbuffer_out_s0_valid
561  }
562  when(sbuffer_out_s0_fire){
563    stateVec(sbuffer_out_s0_evictionIdx).state_inflight := true.B
564    stateVec(sbuffer_out_s0_evictionIdx).w_timeout := false.B
565    // stateVec(sbuffer_out_s0_evictionIdx).s_pipe_req := true.B
566    XSDebug(p"$sbuffer_out_s0_evictionIdx will be sent to Dcache\n")
567  }
568
569  XSDebug(p"need drain:$need_drain cohHasTimeOut: $cohHasTimeOut need replace:$need_replace\n")
570  XSDebug(p"drainIdx:$drainIdx tIdx:$cohTimeOutIdx replIdx:$replaceIdx " +
571    p"blocked:${!noSameBlockInflight(sbuffer_out_s0_evictionIdx)} v:${activeMask(sbuffer_out_s0_evictionIdx)}\n")
572  XSDebug(p"sbuffer_out_s0_valid:$sbuffer_out_s0_valid evictIdx:$sbuffer_out_s0_evictionIdx dcache ready:${io.dcache.req.ready}\n")
573  // Note: if other dcache req in the same block are inflight,
574  // the lru update may not accurate
575  accessIdx(EnsbufferWidth).valid := invalidMask(replaceIdx) || (
576    need_replace && !need_drain && !cohHasTimeOut && !missqReplayHasTimeOut && sbuffer_out_s0_cango && activeMask(replaceIdx))
577  accessIdx(EnsbufferWidth).bits := replaceIdx
578  val sbuffer_out_s1_evictionIdx = RegEnable(sbuffer_out_s0_evictionIdx, enable = sbuffer_out_s0_fire)
579  val sbuffer_out_s1_evictionPTag = RegEnable(ptag(sbuffer_out_s0_evictionIdx), enable = sbuffer_out_s0_fire)
580  val sbuffer_out_s1_evictionVTag = RegEnable(vtag(sbuffer_out_s0_evictionIdx), enable = sbuffer_out_s0_fire)
581
582  io.dcache.req.valid := sbuffer_out_s1_valid && !blockDcacheWrite
583  io.dcache.req.bits := DontCare
584  io.dcache.req.bits.cmd   := MemoryOpConstants.M_XWR
585  io.dcache.req.bits.addr  := getAddr(sbuffer_out_s1_evictionPTag)
586  io.dcache.req.bits.vaddr := getAddr(sbuffer_out_s1_evictionVTag)
587  io.dcache.req.bits.data  := data(sbuffer_out_s1_evictionIdx).asUInt
588  io.dcache.req.bits.mask  := mask(sbuffer_out_s1_evictionIdx).asUInt
589  io.dcache.req.bits.id := sbuffer_out_s1_evictionIdx
590
591  when (sbuffer_out_s1_fire) {
592    assert(!(io.dcache.req.bits.vaddr === 0.U))
593    assert(!(io.dcache.req.bits.addr === 0.U))
594  }
595
596  XSDebug(sbuffer_out_s1_fire,
597    p"send buf [$sbuffer_out_s1_evictionIdx] to Dcache, req fire\n"
598  )
599
600  // update sbuffer status according to dcache resp source
601
602  def id_to_sbuffer_id(id: UInt): UInt = {
603    require(id.getWidth >= log2Up(StoreBufferSize))
604    id(log2Up(StoreBufferSize)-1, 0)
605  }
606
607  // hit resp
608  io.dcache.hit_resps.map(resp => {
609    val dcache_resp_id = resp.bits.id
610    when (resp.fire()) {
611      stateVec(dcache_resp_id).state_inflight := false.B
612      stateVec(dcache_resp_id).state_valid := false.B
613      assert(!resp.bits.replay)
614      assert(!resp.bits.miss) // not need to resp if miss, to be opted
615      assert(stateVec(dcache_resp_id).state_inflight === true.B)
616    }
617
618    // Update w_sameblock_inflight flag is delayed for 1 cycle
619    //
620    // When a new req allocate a new line in sbuffer, sameblock_inflight check will ignore
621    // current dcache.hit_resps. Then, in the next cycle, we have plenty of time to check
622    // if the same block is still inflight
623    (0 until StoreBufferSize).map(i => {
624      when(
625        stateVec(i).w_sameblock_inflight &&
626        stateVec(i).state_valid &&
627        RegNext(resp.fire()) &&
628        waitInflightMask(i) === UIntToOH(RegNext(id_to_sbuffer_id(dcache_resp_id)))
629      ){
630        stateVec(i).w_sameblock_inflight := false.B
631      }
632    })
633  })
634
635
636  // replay resp
637  val replay_resp_id = io.dcache.replay_resp.bits.id
638  when (io.dcache.replay_resp.fire()) {
639    missqReplayCount(replay_resp_id) := 0.U
640    stateVec(replay_resp_id).w_timeout := true.B
641    // waiting for timeout
642    assert(io.dcache.replay_resp.bits.replay)
643    assert(stateVec(replay_resp_id).state_inflight === true.B)
644  }
645
646  // TODO: reuse cohCount
647  (0 until StoreBufferSize).map(i => {
648    when(stateVec(i).w_timeout && stateVec(i).state_inflight && !missqReplayCount(i)(MissqReplayCountBits-1)) {
649      missqReplayCount(i) := missqReplayCount(i) + 1.U
650    }
651    when(activeMask(i) && !cohTimeOutMask(i)){
652      cohCount(i) := cohCount(i)+1.U
653    }
654  })
655
656  if (env.EnableDifftest) {
657    // hit resp
658    io.dcache.hit_resps.zipWithIndex.map{case (resp, index) => {
659      val difftest = Module(new DifftestSbufferEvent)
660      val dcache_resp_id = resp.bits.id
661      difftest.io.clock := clock
662      difftest.io.coreid := io.hartId
663      difftest.io.index := index.U
664      difftest.io.sbufferResp := RegNext(resp.fire())
665      difftest.io.sbufferAddr := RegNext(getAddr(ptag(dcache_resp_id)))
666      difftest.io.sbufferData := RegNext(data(dcache_resp_id).asTypeOf(Vec(CacheLineBytes, UInt(8.W))))
667      difftest.io.sbufferMask := RegNext(mask(dcache_resp_id).asUInt)
668    }}
669  }
670
671  // ---------------------- Load Data Forward ---------------------
672  val mismatch = Wire(Vec(LoadPipelineWidth, Bool()))
673  XSPerfAccumulate("vaddr_match_failed", mismatch(0) || mismatch(1))
674  for ((forward, i) <- io.forward.zipWithIndex) {
675    val vtag_matches = VecInit(widthMap(w => vtag(w) === getVTag(forward.vaddr)))
676    // ptag_matches uses paddr from dtlb, which is far from sbuffer
677    val ptag_matches = VecInit(widthMap(w => RegEnable(ptag(w), forward.valid) === RegEnable(getPTag(forward.paddr), forward.valid)))
678    val tag_matches = vtag_matches
679    val tag_mismatch = RegNext(forward.valid) && VecInit(widthMap(w =>
680      RegNext(vtag_matches(w)) =/= ptag_matches(w) && RegNext((activeMask(w) || inflightMask(w)))
681    )).asUInt.orR
682    mismatch(i) := tag_mismatch
683    when (tag_mismatch) {
684      XSDebug("forward tag mismatch: pmatch %x vmatch %x vaddr %x paddr %x\n",
685        RegNext(ptag_matches.asUInt),
686        RegNext(vtag_matches.asUInt),
687        RegNext(forward.vaddr),
688        RegNext(forward.paddr)
689      )
690      forward_need_uarch_drain := true.B
691    }
692    val valid_tag_matches = widthMap(w => tag_matches(w) && activeMask(w))
693    val inflight_tag_matches = widthMap(w => tag_matches(w) && inflightMask(w))
694    val line_offset_mask = UIntToOH(getWordOffset(forward.paddr))
695
696    val valid_tag_match_reg = valid_tag_matches.map(RegNext(_))
697    val inflight_tag_match_reg = inflight_tag_matches.map(RegNext(_))
698    val line_offset_reg = RegNext(line_offset_mask)
699    val forward_mask_candidate_reg = RegEnable(
700      VecInit(mask.map(entry => entry(getWordOffset(forward.paddr)))),
701      forward.valid
702    )
703    val forward_data_candidate_reg = RegEnable(
704      VecInit(data.map(entry => entry(getWordOffset(forward.paddr)))),
705      forward.valid
706    )
707
708    val selectedValidMask = Mux1H(valid_tag_match_reg, forward_mask_candidate_reg)
709    val selectedValidData = Mux1H(valid_tag_match_reg, forward_data_candidate_reg)
710    selectedValidMask.suggestName("selectedValidMask_"+i)
711    selectedValidData.suggestName("selectedValidData_"+i)
712
713    val selectedInflightMask = Mux1H(inflight_tag_match_reg, forward_mask_candidate_reg)
714    val selectedInflightData = Mux1H(inflight_tag_match_reg, forward_data_candidate_reg)
715    selectedInflightMask.suggestName("selectedInflightMask_"+i)
716    selectedInflightData.suggestName("selectedInflightData_"+i)
717
718    // currently not being used
719    val selectedInflightMaskFast = Mux1H(line_offset_mask, Mux1H(inflight_tag_matches, mask).asTypeOf(Vec(CacheLineWords, Vec(DataBytes, Bool()))))
720    val selectedValidMaskFast = Mux1H(line_offset_mask, Mux1H(valid_tag_matches, mask).asTypeOf(Vec(CacheLineWords, Vec(DataBytes, Bool()))))
721
722    forward.dataInvalid := false.B // data in store line merge buffer is always ready
723    forward.matchInvalid := tag_mismatch // paddr / vaddr cam result does not match
724    for (j <- 0 until DataBytes) {
725      forward.forwardMask(j) := false.B
726      forward.forwardData(j) := DontCare
727
728      // valid entries have higher priority than inflight entries
729      when(selectedInflightMask(j)) {
730        forward.forwardMask(j) := true.B
731        forward.forwardData(j) := selectedInflightData(j)
732      }
733      when(selectedValidMask(j)) {
734        forward.forwardMask(j) := true.B
735        forward.forwardData(j) := selectedValidData(j)
736      }
737
738      forward.forwardMaskFast(j) := selectedInflightMaskFast(j) || selectedValidMaskFast(j)
739    }
740  }
741
742  for (i <- 0 until StoreBufferSize) {
743    XSDebug("sbf entry " + i + " : ptag %x vtag %x valid %x active %x inflight %x w_timeout %x\n",
744      ptag(i) << OffsetWidth,
745      vtag(i) << OffsetWidth,
746      stateVec(i).isValid(),
747      activeMask(i),
748      inflightMask(i),
749      stateVec(i).w_timeout
750    )
751  }
752
753  val perf_valid_entry_count = RegNext(PopCount(VecInit(stateVec.map(s => !s.isInvalid())).asUInt))
754  XSPerfHistogram("util", perf_valid_entry_count, true.B, 0, StoreBufferSize, 1)
755  XSPerfAccumulate("sbuffer_req_valid", PopCount(VecInit(io.in.map(_.valid)).asUInt))
756  XSPerfAccumulate("sbuffer_req_fire", PopCount(VecInit(io.in.map(_.fire())).asUInt))
757  XSPerfAccumulate("sbuffer_merge", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && canMerge(i)})).asUInt))
758  XSPerfAccumulate("sbuffer_newline", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && !canMerge(i)})).asUInt))
759  XSPerfAccumulate("dcache_req_valid", io.dcache.req.valid)
760  XSPerfAccumulate("dcache_req_fire", io.dcache.req.fire())
761  XSPerfAccumulate("sbuffer_idle", sbuffer_state === x_idle)
762  XSPerfAccumulate("sbuffer_flush", sbuffer_state === x_drain_sbuffer)
763  XSPerfAccumulate("sbuffer_replace", sbuffer_state === x_replace)
764  XSPerfAccumulate("evenCanInsert", evenCanInsert)
765  XSPerfAccumulate("oddCanInsert", oddCanInsert)
766  XSPerfAccumulate("mainpipe_resp_valid", io.dcache.main_pipe_hit_resp.fire())
767  XSPerfAccumulate("refill_resp_valid", io.dcache.refill_hit_resp.fire())
768  XSPerfAccumulate("replay_resp_valid", io.dcache.replay_resp.fire())
769  XSPerfAccumulate("coh_timeout", cohHasTimeOut)
770
771  // val (store_latency_sample, store_latency) = TransactionLatencyCounter(io.lsu.req.fire(), io.lsu.resp.fire())
772  // XSPerfHistogram("store_latency", store_latency, store_latency_sample, 0, 100, 10)
773  // XSPerfAccumulate("store_req", io.lsu.req.fire())
774
775  val perfEvents = Seq(
776    ("sbuffer_req_valid ", PopCount(VecInit(io.in.map(_.valid)).asUInt)                                                                ),
777    ("sbuffer_req_fire  ", PopCount(VecInit(io.in.map(_.fire())).asUInt)                                                               ),
778    ("sbuffer_merge     ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && canMerge(i)})).asUInt)                ),
779    ("sbuffer_newline   ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && !canMerge(i)})).asUInt)               ),
780    ("dcache_req_valid  ", io.dcache.req.valid                                                                                         ),
781    ("dcache_req_fire   ", io.dcache.req.fire()                                                                                        ),
782    ("sbuffer_idle      ", sbuffer_state === x_idle                                                                                    ),
783    ("sbuffer_flush     ", sbuffer_state === x_drain_sbuffer                                                                           ),
784    ("sbuffer_replace   ", sbuffer_state === x_replace                                                                                 ),
785    ("mpipe_resp_valid  ", io.dcache.main_pipe_hit_resp.fire()                                                                         ),
786    ("refill_resp_valid ", io.dcache.refill_hit_resp.fire()                                                                            ),
787    ("replay_resp_valid ", io.dcache.replay_resp.fire()                                                                                ),
788    ("coh_timeout       ", cohHasTimeOut                                                                                               ),
789    ("sbuffer_1_4_valid ", (perf_valid_entry_count < (StoreBufferSize.U/4.U))                                                          ),
790    ("sbuffer_2_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/4.U)) & (perf_valid_entry_count <= (StoreBufferSize.U/2.U))    ),
791    ("sbuffer_3_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/2.U)) & (perf_valid_entry_count <= (StoreBufferSize.U*3.U/4.U))),
792    ("sbuffer_full_valid", (perf_valid_entry_count > (StoreBufferSize.U*3.U/4.U)))
793  )
794  generatePerfEvent()
795
796}