xref: /XiangShan/src/main/scala/xiangshan/mem/sbuffer/Sbuffer.scala (revision deb6421e9ab9b7980dc6c429456fc7bd2161357b)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import xiangshan._
23import utils._
24import xiangshan.cache._
25import difftest._
26import freechips.rocketchip.util._
27
28class SbufferFlushBundle extends Bundle {
29  val valid = Output(Bool())
30  val empty = Input(Bool())
31}
32
33trait HasSbufferConst extends HasXSParameter {
34  val EvictCycles = 1 << 20
35  val SbufferReplayDelayCycles = 16
36  require(isPow2(EvictCycles))
37  val EvictCountBits = log2Up(EvictCycles+1)
38  val MissqReplayCountBits = log2Up(SbufferReplayDelayCycles) + 1
39
40  // dcache write hit resp has 2 sources
41  // refill pipe resp and main pipe resp
42  val NumDcacheWriteResp = 2 // hardcoded
43
44  val SbufferIndexWidth: Int = log2Up(StoreBufferSize)
45  // paddr = ptag + offset
46  val CacheLineBytes: Int = CacheLineSize / 8
47  val CacheLineWords: Int = CacheLineBytes / DataBytes
48  val OffsetWidth: Int = log2Up(CacheLineBytes)
49  val WordsWidth: Int = log2Up(CacheLineWords)
50  val PTagWidth: Int = PAddrBits - OffsetWidth
51  val VTagWidth: Int = VAddrBits - OffsetWidth
52  val WordOffsetWidth: Int = PAddrBits - WordsWidth
53}
54
55class SbufferEntryState (implicit p: Parameters) extends SbufferBundle {
56  val state_valid    = Bool() // this entry is active
57  val state_inflight = Bool() // sbuffer is trying to write this entry to dcache
58  val w_timeout = Bool() // with timeout resp, waiting for resend store pipeline req timeout
59  val w_sameblock_inflight = Bool() // same cache block dcache req is inflight
60
61  def isInvalid(): Bool = !state_valid
62  def isValid(): Bool = state_valid
63  def isActive(): Bool = state_valid && !state_inflight
64  def isInflight(): Bool = state_inflight
65  def isDcacheReqCandidate(): Bool = state_valid && !state_inflight && !w_sameblock_inflight
66}
67
68class SbufferBundle(implicit p: Parameters) extends XSBundle with HasSbufferConst
69
70class DataWriteReq(implicit p: Parameters) extends SbufferBundle {
71  // univerisal writemask
72  val wvec = UInt(StoreBufferSize.W)
73  // 2 cycle update
74  val mask = UInt((DataBits/8).W)
75  val data = UInt(DataBits.W)
76  val wordOffset = UInt(WordOffsetWidth.W)
77  val wline = Bool() // write full cacheline
78}
79
80class MaskFlushReq(implicit p: Parameters) extends SbufferBundle {
81  // univerisal writemask
82  val wvec = UInt(StoreBufferSize.W)
83}
84
85class SbufferData(implicit p: Parameters) extends XSModule with HasSbufferConst {
86  val io = IO(new Bundle(){
87    // update data and mask when alloc or merge
88    val writeReq = Vec(EnsbufferWidth, Flipped(ValidIO(new DataWriteReq)))
89    // clean mask when deq
90    val maskFlushReq = Vec(NumDcacheWriteResp, Flipped(ValidIO(new MaskFlushReq)))
91    val dataOut = Output(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, UInt(8.W)))))
92    val maskOut = Output(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
93  })
94
95  val data = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, UInt(8.W)))))
96  // val mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
97  val mask = RegInit(
98    VecInit(Seq.fill(StoreBufferSize)(
99      VecInit(Seq.fill(CacheLineWords)(
100        VecInit(Seq.fill(DataBytes)(false.B))
101      ))
102    ))
103  )
104
105  // 2 cycle line mask clean
106  for(line <- 0 until StoreBufferSize){
107    val line_mask_clean_flag = RegNext(
108      io.maskFlushReq.map(a => a.valid && a.bits.wvec(line)).reduce(_ || _)
109    )
110    line_mask_clean_flag.suggestName("line_mask_clean_flag_"+line)
111    when(line_mask_clean_flag){
112      for(word <- 0 until CacheLineWords){
113        for(byte <- 0 until DataBytes){
114          mask(line)(word)(byte) := false.B
115        }
116      }
117    }
118  }
119
120  // 2 cycle data / mask update
121  for(i <- 0 until EnsbufferWidth) {
122    val req = io.writeReq(i)
123    for(line <- 0 until StoreBufferSize){
124      val sbuffer_in_s1_line_wen = req.valid && req.bits.wvec(line)
125      val sbuffer_in_s2_line_wen = RegNext(sbuffer_in_s1_line_wen)
126      val line_write_buffer_data = RegEnable(req.bits.data, sbuffer_in_s1_line_wen)
127      val line_write_buffer_wline = RegEnable(req.bits.wline, sbuffer_in_s1_line_wen)
128      val line_write_buffer_mask = RegEnable(req.bits.mask, sbuffer_in_s1_line_wen)
129      val line_write_buffer_offset = RegEnable(req.bits.wordOffset(WordsWidth-1, 0), sbuffer_in_s1_line_wen)
130      sbuffer_in_s1_line_wen.suggestName("sbuffer_in_s1_line_wen_"+line)
131      sbuffer_in_s2_line_wen.suggestName("sbuffer_in_s2_line_wen_"+line)
132      line_write_buffer_data.suggestName("line_write_buffer_data_"+line)
133      line_write_buffer_wline.suggestName("line_write_buffer_wline_"+line)
134      line_write_buffer_mask.suggestName("line_write_buffer_mask_"+line)
135      line_write_buffer_offset.suggestName("line_write_buffer_offset_"+line)
136      for(word <- 0 until CacheLineWords){
137        for(byte <- 0 until DataBytes){
138          val write_byte = sbuffer_in_s2_line_wen && (
139            line_write_buffer_mask(byte) && (line_write_buffer_offset === word.U) ||
140            line_write_buffer_wline
141          )
142          when(write_byte){
143            data(line)(word)(byte) := line_write_buffer_data(byte*8+7, byte*8)
144            mask(line)(word)(byte) := true.B
145          }
146        }
147      }
148    }
149  }
150
151  // 1 cycle line mask clean
152  // for(i <- 0 until EnsbufferWidth) {
153  //   val req = io.writeReq(i)
154  //   when(req.valid){
155  //     for(line <- 0 until StoreBufferSize){
156  //       when(
157  //         req.bits.wvec(line) &&
158  //         req.bits.cleanMask
159  //       ){
160  //         for(word <- 0 until CacheLineWords){
161  //           for(byte <- 0 until DataBytes){
162  //             mask(line)(word)(byte) := false.B
163  //             val debug_last_cycle_write_byte = RegNext(req.valid && req.bits.wvec(line) && (
164  //               req.bits.mask(byte) && (req.bits.wordOffset(WordsWidth-1, 0) === word.U) ||
165  //               req.bits.wline
166  //             ))
167  //             assert(!debug_last_cycle_write_byte)
168  //           }
169  //         }
170  //       }
171  //     }
172  //   }
173  // }
174
175  io.dataOut := data
176  io.maskOut := mask
177}
178
179class Sbuffer(implicit p: Parameters) extends DCacheModule with HasSbufferConst with HasPerfEvents {
180  val io = IO(new Bundle() {
181    val hartId = Input(UInt(8.W))
182    val in = Vec(EnsbufferWidth, Flipped(Decoupled(new DCacheWordReqWithVaddr)))  //Todo: store logic only support Width == 2 now
183    val dcache = Flipped(new DCacheToSbufferIO)
184    val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
185    val sqempty = Input(Bool())
186    val flush = Flipped(new SbufferFlushBundle)
187    val csrCtrl = Flipped(new CustomCSRCtrlIO)
188  })
189
190  val dataModule = Module(new SbufferData)
191  dataModule.io.writeReq <> DontCare
192  val writeReq = dataModule.io.writeReq
193
194  val ptag = Reg(Vec(StoreBufferSize, UInt(PTagWidth.W)))
195  val vtag = Reg(Vec(StoreBufferSize, UInt(VTagWidth.W)))
196  val debug_mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool()))))
197  val waitInflightMask = Reg(Vec(StoreBufferSize, UInt(StoreBufferSize.W)))
198  val data = dataModule.io.dataOut
199  val mask = dataModule.io.maskOut
200  val stateVec = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U.asTypeOf(new SbufferEntryState))))
201  val cohCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(EvictCountBits.W))))
202  val missqReplayCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(MissqReplayCountBits.W))))
203
204  val sbuffer_out_s0_fire = Wire(Bool())
205
206  /*
207       idle --[flush]   --> drain   --[buf empty]--> idle
208            --[buf full]--> replace --[dcache resp]--> idle
209  */
210  // x_drain_all: drain store queue and sbuffer
211  // x_drain_sbuffer: drain sbuffer only, block store queue to sbuffer write
212  val x_idle :: x_replace :: x_drain_all :: x_drain_sbuffer :: Nil = Enum(4)
213  def needDrain(state: UInt): Bool =
214    state(1)
215  val sbuffer_state = RegInit(x_idle)
216
217  // ---------------------- Store Enq Sbuffer ---------------------
218
219  def getPTag(pa: UInt): UInt =
220    pa(PAddrBits - 1, PAddrBits - PTagWidth)
221
222  def getVTag(va: UInt): UInt =
223    va(VAddrBits - 1, VAddrBits - VTagWidth)
224
225  def getWord(pa: UInt): UInt =
226    pa(PAddrBits-1, 3)
227
228  def getWordOffset(pa: UInt): UInt =
229    pa(OffsetWidth-1, 3)
230
231  def getAddr(ptag: UInt): UInt =
232    Cat(ptag, 0.U((PAddrBits - PTagWidth).W))
233
234  def getByteOffset(offect: UInt): UInt =
235    Cat(offect(OffsetWidth - 1, 3), 0.U(3.W))
236
237  def isOneOf(key: UInt, seq: Seq[UInt]): Bool =
238    if(seq.isEmpty) false.B else Cat(seq.map(_===key)).orR()
239
240  def widthMap[T <: Data](f: Int => T) = (0 until StoreBufferSize) map f
241
242  // sbuffer entry count
243
244  val plru = new PseudoLRU(StoreBufferSize)
245  val accessIdx = Wire(Vec(EnsbufferWidth + 1, Valid(UInt(SbufferIndexWidth.W))))
246
247  val replaceIdx = plru.way
248  val replaceIdxOH = UIntToOH(plru.way)
249  plru.access(accessIdx)
250
251  //-------------------------cohCount-----------------------------
252  // insert and merge: cohCount=0
253  // every cycle cohCount+=1
254  // if cohCount(EvictCountBits-1)==1, evict
255  val cohTimeOutMask = VecInit(widthMap(i => cohCount(i)(EvictCountBits - 1) && stateVec(i).isActive()))
256  val (cohTimeOutIdx, cohHasTimeOut) = PriorityEncoderWithFlag(cohTimeOutMask)
257  val cohTimeOutOH = PriorityEncoderOH(cohTimeOutMask)
258  val missqReplayTimeOutMask = VecInit(widthMap(i => missqReplayCount(i)(MissqReplayCountBits - 1) && stateVec(i).w_timeout))
259  val (missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen) = PriorityEncoderWithFlag(missqReplayTimeOutMask)
260  val missqReplayHasTimeOut = RegNext(missqReplayHasTimeOutGen) && !RegNext(sbuffer_out_s0_fire)
261  val missqReplayTimeOutIdx = RegEnable(missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen)
262
263  //-------------------------sbuffer enqueue-----------------------------
264
265  // Now sbuffer enq logic is divided into 3 stages:
266
267  // sbuffer_in_s0:
268  // * read data and meta from store queue
269  // * store them in 2 entry fifo queue
270
271  // sbuffer_in_s1:
272  // * read data and meta from fifo queue
273  // * update sbuffer meta (vtag, ptag, flag)
274  // * prevert that line from being sent to dcache (add a block condition)
275  // * prepare cacheline level write enable signal, RegNext() data and mask
276
277  // sbuffer_in_s2:
278  // * use cacheline level buffer to update sbuffer data and mask
279  // * remove dcache write block (if there is)
280
281  val activeMask = VecInit(stateVec.map(s => s.isActive()))
282  val drainIdx = PriorityEncoder(activeMask)
283
284  val inflightMask = VecInit(stateVec.map(s => s.isInflight()))
285
286  val inptags = io.in.map(in => getPTag(in.bits.addr))
287  val invtags = io.in.map(in => getVTag(in.bits.vaddr))
288  val sameTag = inptags(0) === inptags(1)
289  val firstWord = getWord(io.in(0).bits.addr)
290  val secondWord = getWord(io.in(1).bits.addr)
291  val sameWord = firstWord === secondWord
292
293  // merge condition
294  val mergeMask = Wire(Vec(EnsbufferWidth, Vec(StoreBufferSize, Bool())))
295  val mergeIdx = mergeMask.map(PriorityEncoder(_)) // avoid using mergeIdx for better timing
296  val canMerge = mergeMask.map(ParallelOR(_))
297  val mergeVec = mergeMask.map(_.asUInt)
298
299  for(i <- 0 until EnsbufferWidth){
300    mergeMask(i) := widthMap(j =>
301      inptags(i) === ptag(j) && activeMask(j)
302    )
303    assert(!(PopCount(mergeMask(i).asUInt) > 1.U && io.in(i).fire()))
304  }
305
306  // insert condition
307  // firstInsert: the first invalid entry
308  // if first entry canMerge or second entry has the same ptag with the first entry,
309  // secondInsert equal the first invalid entry, otherwise, the second invalid entry
310  val invalidMask = VecInit(stateVec.map(s => s.isInvalid()))
311  val evenInvalidMask = GetEvenBits(invalidMask.asUInt)
312  val oddInvalidMask = GetOddBits(invalidMask.asUInt)
313
314  def getFirstOneOH(input: UInt): UInt = {
315    assert(input.getWidth > 1)
316    val output = WireInit(VecInit(input.asBools))
317    (1 until input.getWidth).map(i => {
318      output(i) := !input(i - 1, 0).orR && input(i)
319    })
320    output.asUInt
321  }
322
323  val evenRawInsertVec = getFirstOneOH(evenInvalidMask)
324  val oddRawInsertVec = getFirstOneOH(oddInvalidMask)
325  val (evenRawInsertIdx, evenCanInsert) = PriorityEncoderWithFlag(evenInvalidMask)
326  val (oddRawInsertIdx, oddCanInsert) = PriorityEncoderWithFlag(oddInvalidMask)
327  val evenInsertIdx = Cat(evenRawInsertIdx, 0.U(1.W)) // slow to generate, for debug only
328  val oddInsertIdx = Cat(oddRawInsertIdx, 1.U(1.W)) // slow to generate, for debug only
329  val evenInsertVec = GetEvenBits.reverse(evenRawInsertVec)
330  val oddInsertVec = GetOddBits.reverse(oddRawInsertVec)
331
332  val enbufferSelReg = RegInit(false.B)
333  when(io.in(0).valid) {
334    enbufferSelReg := ~enbufferSelReg
335  }
336
337  val firstInsertIdx = Mux(enbufferSelReg, evenInsertIdx, oddInsertIdx) // slow to generate, for debug only
338  val secondInsertIdx = Mux(sameTag,
339    firstInsertIdx,
340    Mux(~enbufferSelReg, evenInsertIdx, oddInsertIdx)
341  ) // slow to generate, for debug only
342  val firstInsertVec = Mux(enbufferSelReg, evenInsertVec, oddInsertVec)
343  val secondInsertVec = Mux(sameTag,
344    firstInsertVec,
345    Mux(~enbufferSelReg, evenInsertVec, oddInsertVec)
346  ) // slow to generate, for debug only
347  val firstCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(enbufferSelReg, evenCanInsert, oddCanInsert)
348  val secondCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(sameTag,
349    firstCanInsert,
350    Mux(~enbufferSelReg, evenCanInsert, oddCanInsert)
351  ) && (EnsbufferWidth >= 1).B
352  val forward_need_uarch_drain = WireInit(false.B)
353  val merge_need_uarch_drain = WireInit(false.B)
354  val do_uarch_drain = RegNext(forward_need_uarch_drain) || RegNext(RegNext(merge_need_uarch_drain))
355  XSPerfAccumulate("do_uarch_drain", do_uarch_drain)
356
357  io.in(0).ready := firstCanInsert
358  io.in(1).ready := secondCanInsert && !sameWord && io.in(0).ready
359
360  def wordReqToBufLine( // allocate a new line in sbuffer
361    req: DCacheWordReq,
362    reqptag: UInt,
363    reqvtag: UInt,
364    insertIdx: UInt,
365    insertVec: UInt,
366    wordOffset: UInt
367  ): Unit = {
368    assert(UIntToOH(insertIdx) === insertVec)
369    val sameBlockInflightMask = genSameBlockInflightMask(reqptag)
370    (0 until StoreBufferSize).map(entryIdx => {
371      when(insertVec(entryIdx)){
372        stateVec(entryIdx).state_valid := true.B
373        stateVec(entryIdx).w_sameblock_inflight := sameBlockInflightMask.orR // set w_sameblock_inflight when a line is first allocated
374        when(sameBlockInflightMask.orR){
375          waitInflightMask(entryIdx) := sameBlockInflightMask
376        }
377        cohCount(entryIdx) := 0.U
378        // missqReplayCount(insertIdx) := 0.U
379        ptag(entryIdx) := reqptag
380        vtag(entryIdx) := reqvtag // update vtag iff a new sbuffer line is allocated
381      }
382    })
383  }
384
385  def mergeWordReq( // merge write req into an existing line
386    req: DCacheWordReq,
387    reqptag: UInt,
388    reqvtag: UInt,
389    mergeIdx: UInt,
390    mergeVec: UInt,
391    wordOffset: UInt
392  ): Unit = {
393    assert(UIntToOH(mergeIdx) === mergeVec)
394    (0 until StoreBufferSize).map(entryIdx => {
395      when(mergeVec(entryIdx)) {
396        cohCount(entryIdx) := 0.U
397        // missqReplayCount(entryIdx) := 0.U
398        // check if vtag is the same, if not, trigger sbuffer flush
399        when(reqvtag =/= vtag(entryIdx)) {
400          XSDebug("reqvtag =/= sbufvtag req(vtag %x ptag %x) sbuffer(vtag %x ptag %x)\n",
401            reqvtag << OffsetWidth,
402            reqptag << OffsetWidth,
403            vtag(entryIdx) << OffsetWidth,
404            ptag(entryIdx) << OffsetWidth
405          )
406          merge_need_uarch_drain := true.B
407        }
408      }
409    })
410  }
411
412  for(((in, wordOffset), i) <- io.in.zip(Seq(firstWord, secondWord)).zipWithIndex){
413    writeReq(i).valid := in.fire()
414    writeReq(i).bits.wordOffset := wordOffset
415    writeReq(i).bits.mask := in.bits.mask
416    writeReq(i).bits.data := in.bits.data
417    writeReq(i).bits.wline := in.bits.wline
418    val debug_insertIdx = if(i == 0) firstInsertIdx else secondInsertIdx
419    val insertVec = if(i == 0) firstInsertVec else secondInsertVec
420    assert(!((PopCount(insertVec) > 1.U) && in.fire()))
421    val insertIdx = OHToUInt(insertVec)
422    accessIdx(i).valid := RegNext(in.fire())
423    accessIdx(i).bits := RegNext(Mux(canMerge(i), mergeIdx(i), insertIdx))
424    when(in.fire()){
425      when(canMerge(i)){
426        writeReq(i).bits.wvec := mergeVec(i)
427        mergeWordReq(in.bits, inptags(i), invtags(i), mergeIdx(i), mergeVec(i), wordOffset)
428        XSDebug(p"merge req $i to line [${mergeIdx(i)}]\n")
429      }.otherwise({
430        writeReq(i).bits.wvec := insertVec
431        wordReqToBufLine(in.bits, inptags(i), invtags(i), insertIdx, insertVec, wordOffset)
432        XSDebug(p"insert req $i to line[$insertIdx]\n")
433        assert(debug_insertIdx === insertIdx)
434      })
435    }
436  }
437
438
439  for(i <- 0 until StoreBufferSize){
440    XSDebug(stateVec(i).isValid(),
441      p"[$i] timeout:${cohCount(i)(EvictCountBits-1)} state:${stateVec(i)}\n"
442    )
443  }
444
445  for((req, i) <- io.in.zipWithIndex){
446    XSDebug(req.fire(),
447      p"accept req [$i]: " +
448        p"addr:${Hexadecimal(req.bits.addr)} " +
449        p"mask:${Binary(req.bits.mask)} " +
450        p"data:${Hexadecimal(req.bits.data)}\n"
451    )
452    XSDebug(req.valid && !req.ready,
453      p"req [$i] blocked by sbuffer\n"
454    )
455  }
456
457  // ---------------------- Send Dcache Req ---------------------
458
459  val sbuffer_empty = Cat(invalidMask).andR()
460  val sq_empty = !Cat(io.in.map(_.valid)).orR()
461  val empty = sbuffer_empty && sq_empty
462  val threshold = RegNext(io.csrCtrl.sbuffer_threshold +& 1.U)
463  val validCount = PopCount(activeMask)
464  val do_eviction = RegNext(validCount >= threshold || validCount === (StoreBufferSize-1).U, init = false.B)
465  require((StoreBufferThreshold + 1) <= StoreBufferSize)
466
467  XSDebug(p"validCount[$validCount]\n")
468
469  io.flush.empty := RegNext(empty && io.sqempty)
470  // lru.io.flush := sbuffer_state === x_drain_all && empty
471  switch(sbuffer_state){
472    is(x_idle){
473      when(io.flush.valid){
474        sbuffer_state := x_drain_all
475      }.elsewhen(do_uarch_drain){
476        sbuffer_state := x_drain_sbuffer
477      }.elsewhen(do_eviction){
478        sbuffer_state := x_replace
479      }
480    }
481    is(x_drain_all){
482      when(empty){
483        sbuffer_state := x_idle
484      }
485    }
486    is(x_drain_sbuffer){
487      when(io.flush.valid){
488        sbuffer_state := x_drain_all
489      }.elsewhen(sbuffer_empty){
490        sbuffer_state := x_idle
491      }
492    }
493    is(x_replace){
494      when(io.flush.valid){
495        sbuffer_state := x_drain_all
496      }.elsewhen(do_uarch_drain){
497        sbuffer_state := x_drain_sbuffer
498      }.elsewhen(!do_eviction){
499        sbuffer_state := x_idle
500      }
501    }
502  }
503  XSDebug(p"sbuffer state:${sbuffer_state} do eviction:${do_eviction} empty:${empty}\n")
504
505  def noSameBlockInflight(idx: UInt): Bool = {
506    // stateVec(idx) itself must not be s_inflight
507    !Cat(widthMap(i => inflightMask(i) && ptag(idx) === ptag(i))).orR()
508  }
509
510  def genSameBlockInflightMask(ptag_in: UInt): UInt = {
511    val mask = VecInit(widthMap(i => inflightMask(i) && ptag_in === ptag(i))).asUInt // quite slow, use it with care
512    assert(!(PopCount(mask) > 1.U))
513    mask
514  }
515
516  def haveSameBlockInflight(ptag_in: UInt): Bool = {
517    genSameBlockInflightMask(ptag_in).orR
518  }
519
520  // ---------------------------------------------------------------------------
521  // sbuffer to dcache pipeline
522  // ---------------------------------------------------------------------------
523
524  // Now sbuffer deq logic is divided into 2 stages:
525
526  // sbuffer_out_s0:
527  // * read data and meta from sbuffer
528  // * RegNext() them
529  // * set line state to inflight
530
531  // sbuffer_out_s1:
532  // * send write req to dcache
533
534  // sbuffer_out_extra:
535  // * receive write result from dcache
536  // * update line state
537
538  val sbuffer_out_s1_ready = Wire(Bool())
539
540  // ---------------------------------------------------------------------------
541  // sbuffer_out_s0
542  // ---------------------------------------------------------------------------
543
544  val need_drain = needDrain(sbuffer_state)
545  val need_replace = do_eviction || (sbuffer_state === x_replace)
546  val sbuffer_out_s0_evictionIdx = Mux(missqReplayHasTimeOut,
547    missqReplayTimeOutIdx,
548    Mux(need_drain,
549      drainIdx,
550      Mux(cohHasTimeOut, cohTimeOutIdx, replaceIdx)
551    )
552  )
553
554  // If there is a inflight dcache req which has same ptag with sbuffer_out_s0_evictionIdx's ptag,
555  // current eviction should be blocked.
556  val sbuffer_out_s0_valid = missqReplayHasTimeOut ||
557    stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate() &&
558    (need_drain || cohHasTimeOut || need_replace)
559  assert(!(
560    stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate &&
561    !noSameBlockInflight(sbuffer_out_s0_evictionIdx)
562  ))
563  val sbuffer_out_s0_cango = sbuffer_out_s1_ready
564  sbuffer_out_s0_fire := sbuffer_out_s0_valid && sbuffer_out_s0_cango
565
566  // ---------------------------------------------------------------------------
567  // sbuffer_out_s1
568  // ---------------------------------------------------------------------------
569
570  // TODO: use EnsbufferWidth
571  val shouldWaitWriteFinish = RegNext(VecInit((0 until EnsbufferWidth).map{i =>
572    (writeReq(i).bits.wvec.asUInt & UIntToOH(sbuffer_out_s0_evictionIdx).asUInt).orR &&
573    writeReq(i).valid
574  }).asUInt.orR)
575  // block dcache write if read / write hazard
576  val blockDcacheWrite = shouldWaitWriteFinish
577
578  val sbuffer_out_s1_valid = RegInit(false.B)
579  sbuffer_out_s1_ready := io.dcache.req.ready && !blockDcacheWrite || !sbuffer_out_s1_valid
580  val sbuffer_out_s1_fire = io.dcache.req.fire()
581
582  // when sbuffer_out_s1_fire, send dcache req stored in pipeline reg to dcache
583  when(sbuffer_out_s1_fire){
584    sbuffer_out_s1_valid := false.B
585  }
586  // when sbuffer_out_s0_fire, read dcache req data and store them in a pipeline reg
587  when(sbuffer_out_s0_cango){
588    sbuffer_out_s1_valid := sbuffer_out_s0_valid
589  }
590  when(sbuffer_out_s0_fire){
591    stateVec(sbuffer_out_s0_evictionIdx).state_inflight := true.B
592    stateVec(sbuffer_out_s0_evictionIdx).w_timeout := false.B
593    // stateVec(sbuffer_out_s0_evictionIdx).s_pipe_req := true.B
594    XSDebug(p"$sbuffer_out_s0_evictionIdx will be sent to Dcache\n")
595  }
596
597  XSDebug(p"need drain:$need_drain cohHasTimeOut: $cohHasTimeOut need replace:$need_replace\n")
598  XSDebug(p"drainIdx:$drainIdx tIdx:$cohTimeOutIdx replIdx:$replaceIdx " +
599    p"blocked:${!noSameBlockInflight(sbuffer_out_s0_evictionIdx)} v:${activeMask(sbuffer_out_s0_evictionIdx)}\n")
600  XSDebug(p"sbuffer_out_s0_valid:$sbuffer_out_s0_valid evictIdx:$sbuffer_out_s0_evictionIdx dcache ready:${io.dcache.req.ready}\n")
601  // Note: if other dcache req in the same block are inflight,
602  // the lru update may not accurate
603  accessIdx(EnsbufferWidth).valid := invalidMask(replaceIdx) || (
604    need_replace && !need_drain && !cohHasTimeOut && !missqReplayHasTimeOut && sbuffer_out_s0_cango && activeMask(replaceIdx))
605  accessIdx(EnsbufferWidth).bits := replaceIdx
606  val sbuffer_out_s1_evictionIdx = RegEnable(sbuffer_out_s0_evictionIdx, enable = sbuffer_out_s0_fire)
607  val sbuffer_out_s1_evictionPTag = RegEnable(ptag(sbuffer_out_s0_evictionIdx), enable = sbuffer_out_s0_fire)
608  val sbuffer_out_s1_evictionVTag = RegEnable(vtag(sbuffer_out_s0_evictionIdx), enable = sbuffer_out_s0_fire)
609
610  io.dcache.req.valid := sbuffer_out_s1_valid && !blockDcacheWrite
611  io.dcache.req.bits := DontCare
612  io.dcache.req.bits.cmd   := MemoryOpConstants.M_XWR
613  io.dcache.req.bits.addr  := getAddr(sbuffer_out_s1_evictionPTag)
614  io.dcache.req.bits.vaddr := getAddr(sbuffer_out_s1_evictionVTag)
615  io.dcache.req.bits.data  := data(sbuffer_out_s1_evictionIdx).asUInt
616  io.dcache.req.bits.mask  := mask(sbuffer_out_s1_evictionIdx).asUInt
617  io.dcache.req.bits.id := sbuffer_out_s1_evictionIdx
618
619  when (sbuffer_out_s1_fire) {
620    assert(!(io.dcache.req.bits.vaddr === 0.U))
621    assert(!(io.dcache.req.bits.addr === 0.U))
622  }
623
624  XSDebug(sbuffer_out_s1_fire,
625    p"send buf [$sbuffer_out_s1_evictionIdx] to Dcache, req fire\n"
626  )
627
628  // update sbuffer status according to dcache resp source
629
630  def id_to_sbuffer_id(id: UInt): UInt = {
631    require(id.getWidth >= log2Up(StoreBufferSize))
632    id(log2Up(StoreBufferSize)-1, 0)
633  }
634
635  // hit resp
636  io.dcache.hit_resps.map(resp => {
637    val dcache_resp_id = resp.bits.id
638    when (resp.fire()) {
639      stateVec(dcache_resp_id).state_inflight := false.B
640      stateVec(dcache_resp_id).state_valid := false.B
641      assert(!resp.bits.replay)
642      assert(!resp.bits.miss) // not need to resp if miss, to be opted
643      assert(stateVec(dcache_resp_id).state_inflight === true.B)
644    }
645
646    // Update w_sameblock_inflight flag is delayed for 1 cycle
647    //
648    // When a new req allocate a new line in sbuffer, sameblock_inflight check will ignore
649    // current dcache.hit_resps. Then, in the next cycle, we have plenty of time to check
650    // if the same block is still inflight
651    (0 until StoreBufferSize).map(i => {
652      when(
653        stateVec(i).w_sameblock_inflight &&
654        stateVec(i).state_valid &&
655        RegNext(resp.fire()) &&
656        waitInflightMask(i) === UIntToOH(RegNext(id_to_sbuffer_id(dcache_resp_id)))
657      ){
658        stateVec(i).w_sameblock_inflight := false.B
659      }
660    })
661  })
662
663  io.dcache.hit_resps.zip(dataModule.io.maskFlushReq).map{case (resp, maskFlush) => {
664    maskFlush.valid := resp.fire()
665    maskFlush.bits.wvec := UIntToOH(resp.bits.id)
666  }}
667
668  // replay resp
669  val replay_resp_id = io.dcache.replay_resp.bits.id
670  when (io.dcache.replay_resp.fire()) {
671    missqReplayCount(replay_resp_id) := 0.U
672    stateVec(replay_resp_id).w_timeout := true.B
673    // waiting for timeout
674    assert(io.dcache.replay_resp.bits.replay)
675    assert(stateVec(replay_resp_id).state_inflight === true.B)
676  }
677
678  // TODO: reuse cohCount
679  (0 until StoreBufferSize).map(i => {
680    when(stateVec(i).w_timeout && stateVec(i).state_inflight && !missqReplayCount(i)(MissqReplayCountBits-1)) {
681      missqReplayCount(i) := missqReplayCount(i) + 1.U
682    }
683    when(activeMask(i) && !cohTimeOutMask(i)){
684      cohCount(i) := cohCount(i)+1.U
685    }
686  })
687
688  if (env.EnableDifftest) {
689    // hit resp
690    io.dcache.hit_resps.zipWithIndex.map{case (resp, index) => {
691      val difftest = Module(new DifftestSbufferEvent)
692      val dcache_resp_id = resp.bits.id
693      difftest.io.clock := clock
694      difftest.io.coreid := io.hartId
695      difftest.io.index := index.U
696      difftest.io.sbufferResp := RegNext(resp.fire())
697      difftest.io.sbufferAddr := RegNext(getAddr(ptag(dcache_resp_id)))
698      difftest.io.sbufferData := RegNext(data(dcache_resp_id).asTypeOf(Vec(CacheLineBytes, UInt(8.W))))
699      difftest.io.sbufferMask := RegNext(mask(dcache_resp_id).asUInt)
700    }}
701  }
702
703  // ---------------------- Load Data Forward ---------------------
704  val mismatch = Wire(Vec(LoadPipelineWidth, Bool()))
705  XSPerfAccumulate("vaddr_match_failed", mismatch(0) || mismatch(1))
706  for ((forward, i) <- io.forward.zipWithIndex) {
707    val vtag_matches = VecInit(widthMap(w => vtag(w) === getVTag(forward.vaddr)))
708    // ptag_matches uses paddr from dtlb, which is far from sbuffer
709    val ptag_matches = VecInit(widthMap(w => RegEnable(ptag(w), forward.valid) === RegEnable(getPTag(forward.paddr), forward.valid)))
710    val tag_matches = vtag_matches
711    val tag_mismatch = RegNext(forward.valid) && VecInit(widthMap(w =>
712      RegNext(vtag_matches(w)) =/= ptag_matches(w) && RegNext((activeMask(w) || inflightMask(w)))
713    )).asUInt.orR
714    mismatch(i) := tag_mismatch
715    when (tag_mismatch) {
716      XSDebug("forward tag mismatch: pmatch %x vmatch %x vaddr %x paddr %x\n",
717        RegNext(ptag_matches.asUInt),
718        RegNext(vtag_matches.asUInt),
719        RegNext(forward.vaddr),
720        RegNext(forward.paddr)
721      )
722      forward_need_uarch_drain := true.B
723    }
724    val valid_tag_matches = widthMap(w => tag_matches(w) && activeMask(w))
725    val inflight_tag_matches = widthMap(w => tag_matches(w) && inflightMask(w))
726    val line_offset_mask = UIntToOH(getWordOffset(forward.paddr))
727
728    val valid_tag_match_reg = valid_tag_matches.map(RegNext(_))
729    val inflight_tag_match_reg = inflight_tag_matches.map(RegNext(_))
730    val line_offset_reg = RegNext(line_offset_mask)
731    val forward_mask_candidate_reg = RegEnable(
732      VecInit(mask.map(entry => entry(getWordOffset(forward.paddr)))),
733      forward.valid
734    )
735    val forward_data_candidate_reg = RegEnable(
736      VecInit(data.map(entry => entry(getWordOffset(forward.paddr)))),
737      forward.valid
738    )
739
740    val selectedValidMask = Mux1H(valid_tag_match_reg, forward_mask_candidate_reg)
741    val selectedValidData = Mux1H(valid_tag_match_reg, forward_data_candidate_reg)
742    selectedValidMask.suggestName("selectedValidMask_"+i)
743    selectedValidData.suggestName("selectedValidData_"+i)
744
745    val selectedInflightMask = Mux1H(inflight_tag_match_reg, forward_mask_candidate_reg)
746    val selectedInflightData = Mux1H(inflight_tag_match_reg, forward_data_candidate_reg)
747    selectedInflightMask.suggestName("selectedInflightMask_"+i)
748    selectedInflightData.suggestName("selectedInflightData_"+i)
749
750    // currently not being used
751    val selectedInflightMaskFast = Mux1H(line_offset_mask, Mux1H(inflight_tag_matches, mask).asTypeOf(Vec(CacheLineWords, Vec(DataBytes, Bool()))))
752    val selectedValidMaskFast = Mux1H(line_offset_mask, Mux1H(valid_tag_matches, mask).asTypeOf(Vec(CacheLineWords, Vec(DataBytes, Bool()))))
753
754    forward.dataInvalid := false.B // data in store line merge buffer is always ready
755    forward.matchInvalid := tag_mismatch // paddr / vaddr cam result does not match
756    for (j <- 0 until DataBytes) {
757      forward.forwardMask(j) := false.B
758      forward.forwardData(j) := DontCare
759
760      // valid entries have higher priority than inflight entries
761      when(selectedInflightMask(j)) {
762        forward.forwardMask(j) := true.B
763        forward.forwardData(j) := selectedInflightData(j)
764      }
765      when(selectedValidMask(j)) {
766        forward.forwardMask(j) := true.B
767        forward.forwardData(j) := selectedValidData(j)
768      }
769
770      forward.forwardMaskFast(j) := selectedInflightMaskFast(j) || selectedValidMaskFast(j)
771    }
772  }
773
774  for (i <- 0 until StoreBufferSize) {
775    XSDebug("sbf entry " + i + " : ptag %x vtag %x valid %x active %x inflight %x w_timeout %x\n",
776      ptag(i) << OffsetWidth,
777      vtag(i) << OffsetWidth,
778      stateVec(i).isValid(),
779      activeMask(i),
780      inflightMask(i),
781      stateVec(i).w_timeout
782    )
783  }
784
785  val perf_valid_entry_count = RegNext(PopCount(VecInit(stateVec.map(s => !s.isInvalid())).asUInt))
786  XSPerfHistogram("util", perf_valid_entry_count, true.B, 0, StoreBufferSize, 1)
787  XSPerfAccumulate("sbuffer_req_valid", PopCount(VecInit(io.in.map(_.valid)).asUInt))
788  XSPerfAccumulate("sbuffer_req_fire", PopCount(VecInit(io.in.map(_.fire())).asUInt))
789  XSPerfAccumulate("sbuffer_merge", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && canMerge(i)})).asUInt))
790  XSPerfAccumulate("sbuffer_newline", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && !canMerge(i)})).asUInt))
791  XSPerfAccumulate("dcache_req_valid", io.dcache.req.valid)
792  XSPerfAccumulate("dcache_req_fire", io.dcache.req.fire())
793  XSPerfAccumulate("sbuffer_idle", sbuffer_state === x_idle)
794  XSPerfAccumulate("sbuffer_flush", sbuffer_state === x_drain_sbuffer)
795  XSPerfAccumulate("sbuffer_replace", sbuffer_state === x_replace)
796  XSPerfAccumulate("evenCanInsert", evenCanInsert)
797  XSPerfAccumulate("oddCanInsert", oddCanInsert)
798  XSPerfAccumulate("mainpipe_resp_valid", io.dcache.main_pipe_hit_resp.fire())
799  XSPerfAccumulate("refill_resp_valid", io.dcache.refill_hit_resp.fire())
800  XSPerfAccumulate("replay_resp_valid", io.dcache.replay_resp.fire())
801  XSPerfAccumulate("coh_timeout", cohHasTimeOut)
802
803  // val (store_latency_sample, store_latency) = TransactionLatencyCounter(io.lsu.req.fire(), io.lsu.resp.fire())
804  // XSPerfHistogram("store_latency", store_latency, store_latency_sample, 0, 100, 10)
805  // XSPerfAccumulate("store_req", io.lsu.req.fire())
806
807  val perfEvents = Seq(
808    ("sbuffer_req_valid ", PopCount(VecInit(io.in.map(_.valid)).asUInt)                                                                ),
809    ("sbuffer_req_fire  ", PopCount(VecInit(io.in.map(_.fire())).asUInt)                                                               ),
810    ("sbuffer_merge     ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && canMerge(i)})).asUInt)                ),
811    ("sbuffer_newline   ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && !canMerge(i)})).asUInt)               ),
812    ("dcache_req_valid  ", io.dcache.req.valid                                                                                         ),
813    ("dcache_req_fire   ", io.dcache.req.fire()                                                                                        ),
814    ("sbuffer_idle      ", sbuffer_state === x_idle                                                                                    ),
815    ("sbuffer_flush     ", sbuffer_state === x_drain_sbuffer                                                                           ),
816    ("sbuffer_replace   ", sbuffer_state === x_replace                                                                                 ),
817    ("mpipe_resp_valid  ", io.dcache.main_pipe_hit_resp.fire()                                                                         ),
818    ("refill_resp_valid ", io.dcache.refill_hit_resp.fire()                                                                            ),
819    ("replay_resp_valid ", io.dcache.replay_resp.fire()                                                                                ),
820    ("coh_timeout       ", cohHasTimeOut                                                                                               ),
821    ("sbuffer_1_4_valid ", (perf_valid_entry_count < (StoreBufferSize.U/4.U))                                                          ),
822    ("sbuffer_2_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/4.U)) & (perf_valid_entry_count <= (StoreBufferSize.U/2.U))    ),
823    ("sbuffer_3_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/2.U)) & (perf_valid_entry_count <= (StoreBufferSize.U*3.U/4.U))),
824    ("sbuffer_full_valid", (perf_valid_entry_count > (StoreBufferSize.U*3.U/4.U)))
825  )
826  generatePerfEvent()
827
828}