xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala (revision e13d224a171ca31556118081225ebfc4b6018142)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import freechips.rocketchip.tilelink._
24import freechips.rocketchip.tilelink.ClientStates._
25import freechips.rocketchip.tilelink.MemoryOpCategories._
26import freechips.rocketchip.tilelink.TLPermissions._
27import difftest._
28import huancun.{AliasKey, DirtyKey, PreferCacheKey, PrefetchKey}
29
30class MissReq(implicit p: Parameters) extends DCacheBundle {
31  val source = UInt(sourceTypeWidth.W)
32  val cmd = UInt(M_SZ.W)
33  val addr = UInt(PAddrBits.W)
34  val vaddr = UInt(VAddrBits.W)
35  val way_en = UInt(DCacheWays.W)
36
37  // store
38  val store_data = UInt((cfg.blockBytes * 8).W)
39  val store_mask = UInt(cfg.blockBytes.W)
40
41  // which word does amo work on?
42  val word_idx = UInt(log2Up(blockWords).W)
43  val amo_data = UInt(DataBits.W)
44  val amo_mask = UInt((DataBits / 8).W)
45
46  val req_coh = new ClientMetadata
47  val replace_coh = new ClientMetadata
48  val replace_tag = UInt(tagBits.W)
49  val id = UInt(reqIdWidth.W)
50
51  // For now, miss queue entry req is actually valid when req.valid && !cancel
52  // * req.valid is fast to generate
53  // * cancel is slow to generate, it will not be used until the last moment
54  //
55  // cancel may come from the following sources:
56  // 1. miss req blocked by writeback queue:
57  //      a writeback req of the same address is in progress
58  // 2. pmp check failed
59  val cancel = Bool() // cancel is slow to generate, it will cancel missreq.valid
60
61  def isLoad = source === LOAD_SOURCE.U
62  def isStore = source === STORE_SOURCE.U
63  def isAMO = source === AMO_SOURCE.U
64  def hit = req_coh.isValid()
65}
66
67class MissEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
68  val io = IO(new Bundle() {
69    // MSHR ID
70    val id = Input(UInt(log2Up(cfg.nMissEntries).W))
71    // client requests
72    val req    = Flipped(ValidIO(new MissReq))
73    // allocate this entry for new req
74    val primary_valid = Input(Bool())
75    // this entry is free and can be allocated to new reqs
76    val primary_ready = Output(Bool())
77    // this entry is busy, but it can merge the new req
78    val secondary_ready = Output(Bool())
79    // this entry is busy and it can not merge the new req
80    val secondary_reject = Output(Bool())
81
82    val refill_to_ldq = ValidIO(new Refill)
83
84    // bus
85    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
86    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
87    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
88
89    // refill pipe
90    val refill_pipe_req = DecoupledIO(new RefillPipeReq)
91    val refill_pipe_resp = Input(Bool())
92
93    // replace pipe
94    val replace_pipe_req = DecoupledIO(new MainPipeReq)
95    val replace_pipe_resp = Input(Bool())
96
97    // main pipe: amo miss
98    val main_pipe_req = DecoupledIO(new MainPipeReq)
99    val main_pipe_resp = Input(Bool())
100
101    val block_addr = ValidIO(UInt(PAddrBits.W))
102
103    val debug_early_replace = ValidIO(new Bundle() {
104      // info about the block that has been replaced
105      val idx = UInt(idxBits.W) // vaddr
106      val tag = UInt(tagBits.W) // paddr
107    })
108  })
109
110  assert(!RegNext(io.primary_valid && !io.primary_ready))
111
112  val req = Reg(new MissReq)
113  val req_valid = RegInit(false.B)
114  val set = addr_to_dcache_set(req.vaddr)
115
116  val s_acquire = RegInit(true.B)
117  val s_grantack = RegInit(true.B)
118  val s_replace_req = RegInit(true.B)
119  val s_refill = RegInit(true.B)
120  val s_mainpipe_req = RegInit(true.B)
121
122  val w_grantfirst = RegInit(true.B)
123  val w_grantlast = RegInit(true.B)
124  val w_replace_resp = RegInit(true.B)
125  val w_refill_resp = RegInit(true.B)
126  val w_mainpipe_resp = RegInit(true.B)
127
128  val release_entry = s_grantack && w_refill_resp && w_mainpipe_resp
129
130  val acquire_not_sent = !s_acquire && !io.mem_acquire.ready
131  val data_not_refilled = !w_grantfirst
132
133  val should_refill_data_reg =  Reg(Bool())
134  val should_refill_data = WireInit(should_refill_data_reg)
135
136  val full_overwrite = req.isStore && req.store_mask.andR
137
138  val (_, _, refill_done, refill_count) = edge.count(io.mem_grant)
139  val grant_param = Reg(UInt(TLPermissions.bdWidth.W))
140
141  when (release_entry && req_valid) {
142    req_valid := false.B
143  }
144
145  val primary_fire = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel)
146  when (primary_fire) {
147    req_valid := true.B
148    req := io.req.bits
149    req.addr := get_block_addr(io.req.bits.addr)
150
151    s_acquire := false.B
152    s_grantack := false.B
153
154    w_grantfirst := false.B
155    w_grantlast := false.B
156
157    when (!io.req.bits.isAMO) {
158      s_refill := false.B
159      w_refill_resp := false.B
160    }
161
162    when (!io.req.bits.hit && io.req.bits.replace_coh.isValid() && !io.req.bits.isAMO) {
163      s_replace_req := false.B
164      w_replace_resp := false.B
165    }
166
167    when (io.req.bits.isAMO) {
168      s_mainpipe_req := false.B
169      w_mainpipe_resp := false.B
170    }
171
172    should_refill_data_reg := io.req.bits.isLoad
173  }
174
175  val secondary_fire = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel)
176  when (secondary_fire) {
177    assert(io.req.bits.req_coh.state <= req.req_coh.state)
178    assert(!(io.req.bits.isAMO || req.isAMO))
179    // use the most uptodate meta
180    req.req_coh := io.req.bits.req_coh
181
182    when (io.req.bits.isStore) {
183      req := io.req.bits
184      req.addr := get_block_addr(io.req.bits.addr)
185      req.way_en := req.way_en
186      req.replace_coh := req.replace_coh
187      req.replace_tag := req.replace_tag
188    }
189
190    should_refill_data := should_refill_data_reg || io.req.bits.isLoad
191    should_refill_data_reg := should_refill_data
192  }
193
194  when (io.mem_acquire.fire()) {
195    s_acquire := true.B
196  }
197
198  val refill_data = Reg(Vec(blockRows, UInt(rowBits.W)))
199  val refill_data_raw = Reg(Vec(blockBytes/beatBytes, UInt(beatBits.W)))
200  val new_data = Wire(Vec(blockRows, UInt(rowBits.W)))
201  val new_mask = Wire(Vec(blockRows, UInt(rowBytes.W)))
202  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
203    val full_wmask = FillInterleaved(8, wmask)
204    (~full_wmask & old_data | full_wmask & new_data)
205  }
206  for (i <- 0 until blockRows) {
207    new_data(i) := req.store_data(rowBits * (i + 1) - 1, rowBits * i)
208    // we only need to merge data for Store
209    new_mask(i) := Mux(req.isStore, req.store_mask(rowBytes * (i + 1) - 1, rowBytes * i), 0.U)
210  }
211  val hasData = RegInit(true.B)
212  val isDirty = RegInit(false.B)
213  when (io.mem_grant.fire()) {
214    w_grantfirst := true.B
215    grant_param := io.mem_grant.bits.param
216    when (edge.hasData(io.mem_grant.bits)) {
217      // GrantData
218      for (i <- 0 until beatRows) {
219        val idx = (refill_count << log2Floor(beatRows)) + i.U
220        val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
221        refill_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
222      }
223      w_grantlast := w_grantlast || refill_done
224      hasData := true.B
225    }.otherwise {
226      // Grant
227      assert(full_overwrite)
228      for (i <- 0 until blockRows) {
229        refill_data(i) := new_data(i)
230      }
231      w_grantlast := true.B
232      hasData := false.B
233    }
234
235    refill_data_raw(refill_count) := io.mem_grant.bits.data
236    isDirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B)
237  }
238
239  when (io.mem_finish.fire()) {
240    s_grantack := true.B
241  }
242
243  when (io.replace_pipe_req.fire()) {
244    s_replace_req := true.B
245  }
246
247  when (io.replace_pipe_resp) {
248    w_replace_resp := true.B
249  }
250
251  when (io.refill_pipe_req.fire()) {
252    s_refill := true.B
253  }
254
255  when (io.refill_pipe_resp) {
256    w_refill_resp := true.B
257  }
258
259  when (io.main_pipe_req.fire()) {
260    s_mainpipe_req := true.B
261  }
262
263  when (io.main_pipe_resp) {
264    w_mainpipe_resp := true.B
265  }
266
267  def before_read_sent_can_merge(new_req: MissReq): Bool = {
268    acquire_not_sent && req.isLoad && (new_req.isLoad || new_req.isStore)
269  }
270
271  def before_data_refill_can_merge(new_req: MissReq): Bool = {
272    data_not_refilled && (req.isLoad || req.isStore) && new_req.isLoad
273  }
274
275  def should_merge(new_req: MissReq): Bool = {
276    val block_match = get_block(req.addr) === get_block(new_req.addr)
277    block_match &&
278    (before_read_sent_can_merge(new_req) ||
279      before_data_refill_can_merge(new_req))
280  }
281
282  def should_reject(new_req: MissReq): Bool = {
283    val block_match = get_block(req.addr) === get_block(new_req.addr)
284    val set_match = set === addr_to_dcache_set(new_req.vaddr)
285
286    req_valid &&
287      Mux(
288        block_match,
289        !before_read_sent_can_merge(new_req) &&
290          !before_data_refill_can_merge(new_req),
291        set_match && new_req.way_en === req.way_en
292      )
293  }
294
295  io.primary_ready := !req_valid
296  io.secondary_ready := should_merge(io.req.bits)
297  io.secondary_reject := should_reject(io.req.bits)
298
299  // should not allocate, merge or reject at the same time
300  assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U))
301
302  val refill_data_splited = WireInit(VecInit(Seq.tabulate(cfg.blockBytes * 8 / l1BusDataWidth)(i => {
303    val data = refill_data.asUInt
304    data((i + 1) * l1BusDataWidth - 1, i * l1BusDataWidth)
305  })))
306  io.refill_to_ldq.valid := RegNext(!w_grantlast && io.mem_grant.fire()) && should_refill_data_reg
307  io.refill_to_ldq.bits.addr := RegNext(req.addr + (refill_count << refillOffBits))
308  io.refill_to_ldq.bits.data := refill_data_splited(RegNext(refill_count))
309  io.refill_to_ldq.bits.refill_done := RegNext(refill_done && io.mem_grant.fire())
310  io.refill_to_ldq.bits.hasdata := hasData
311  io.refill_to_ldq.bits.data_raw := refill_data_raw.asUInt
312
313  io.mem_acquire.valid := !s_acquire
314  val grow_param = req.req_coh.onAccess(req.cmd)._2
315  val acquireBlock = edge.AcquireBlock(
316    fromSource = io.id,
317    toAddress = req.addr,
318    lgSize = (log2Up(cfg.blockBytes)).U,
319    growPermissions = grow_param
320  )._2
321  val acquirePerm = edge.AcquirePerm(
322    fromSource = io.id,
323    toAddress = req.addr,
324    lgSize = (log2Up(cfg.blockBytes)).U,
325    growPermissions = grow_param
326  )._2
327  io.mem_acquire.bits := Mux(full_overwrite, acquirePerm, acquireBlock)
328  // resolve cache alias by L2
329  io.mem_acquire.bits.user.lift(AliasKey).foreach( _ := req.vaddr(13, 12))
330  // trigger prefetch
331  io.mem_acquire.bits.user.lift(PrefetchKey).foreach(_ := true.B)
332  // prefer not to cache data in L2 by default
333  io.mem_acquire.bits.user.lift(PreferCacheKey).foreach(_ := false.B)
334  require(nSets <= 256)
335
336  io.mem_grant.ready := !w_grantlast && s_acquire
337
338  val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire())
339  assert(RegNext(!io.mem_grant.fire() || edge.isRequest(io.mem_grant.bits)))
340  io.mem_finish.valid := !s_grantack && w_grantfirst
341  io.mem_finish.bits := grantack
342
343  io.replace_pipe_req.valid := !s_replace_req
344  val replace = io.replace_pipe_req.bits
345  replace := DontCare
346  replace.miss := false.B
347  replace.miss_id := io.id
348  replace.miss_dirty := false.B
349  replace.probe := false.B
350  replace.probe_need_data := false.B
351  replace.source := LOAD_SOURCE.U
352  replace.vaddr := req.vaddr // only untag bits are needed
353  replace.addr := Cat(req.replace_tag, 0.U(pgUntagBits.W)) // only tag bits are needed
354  replace.store_mask := 0.U
355  replace.replace := true.B
356  replace.replace_way_en := req.way_en
357
358  io.refill_pipe_req.valid := !s_refill && w_replace_resp && w_grantlast
359  val refill = io.refill_pipe_req.bits
360  refill.source := req.source
361  refill.addr := req.addr
362  refill.way_en := req.way_en
363  refill.wmask := Mux(
364    hasData || req.isLoad,
365    ~0.U(DCacheBanks.W),
366    VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, req.store_mask).orR)).asUInt
367  )
368  refill.data := refill_data.asTypeOf((new RefillPipeReq).data)
369  refill.miss_id := io.id
370  refill.id := req.id
371  def missCohGen(cmd: UInt, param: UInt, dirty: Bool) = {
372    val c = categorize(cmd)
373    MuxLookup(Cat(c, param, dirty), Nothing, Seq(
374      //(effect param) -> (next)
375      Cat(rd, toB, false.B)  -> Branch,
376      Cat(rd, toB, true.B)   -> Branch,
377      Cat(rd, toT, false.B)  -> Trunk,
378      Cat(rd, toT, true.B)   -> Dirty,
379      Cat(wi, toT, false.B)  -> Trunk,
380      Cat(wi, toT, true.B)   -> Dirty,
381      Cat(wr, toT, false.B)  -> Dirty,
382      Cat(wr, toT, true.B)   -> Dirty))
383  }
384  refill.meta.coh := ClientMetadata(missCohGen(req.cmd, grant_param, isDirty))
385  refill.alias := req.vaddr(13, 12) // TODO
386
387  io.main_pipe_req.valid := !s_mainpipe_req && w_grantlast
388  io.main_pipe_req.bits := DontCare
389  io.main_pipe_req.bits.miss := true.B
390  io.main_pipe_req.bits.miss_id := io.id
391  io.main_pipe_req.bits.miss_param := grant_param
392  io.main_pipe_req.bits.miss_dirty := isDirty
393  io.main_pipe_req.bits.probe := false.B
394  io.main_pipe_req.bits.source := req.source
395  io.main_pipe_req.bits.cmd := req.cmd
396  io.main_pipe_req.bits.vaddr := req.vaddr
397  io.main_pipe_req.bits.addr := req.addr
398  io.main_pipe_req.bits.store_data := refill_data.asUInt
399  io.main_pipe_req.bits.store_mask := ~0.U(blockBytes.W)
400  io.main_pipe_req.bits.word_idx := req.word_idx
401  io.main_pipe_req.bits.amo_data := req.amo_data
402  io.main_pipe_req.bits.amo_mask := req.amo_mask
403  io.main_pipe_req.bits.id := req.id
404
405  io.block_addr.valid := req_valid && w_grantlast && !w_refill_resp
406  io.block_addr.bits := req.addr
407
408  io.debug_early_replace.valid := BoolStopWatch(io.replace_pipe_resp, io.refill_pipe_req.fire())
409  io.debug_early_replace.bits.idx := addr_to_dcache_set(req.vaddr)
410  io.debug_early_replace.bits.tag := req.replace_tag
411
412  XSPerfAccumulate("miss_req_primary", primary_fire)
413  XSPerfAccumulate("miss_req_merged", secondary_fire)
414  XSPerfAccumulate("load_miss_penalty_to_use",
415    should_refill_data &&
416      BoolStopWatch(primary_fire, io.refill_to_ldq.valid, true)
417  )
418  XSPerfAccumulate("main_pipe_penalty", BoolStopWatch(io.main_pipe_req.fire(), io.main_pipe_resp))
419  XSPerfAccumulate("penalty_blocked_by_channel_A", io.mem_acquire.valid && !io.mem_acquire.ready)
420  XSPerfAccumulate("penalty_waiting_for_channel_D", s_acquire && !w_grantlast && !io.mem_grant.valid)
421  XSPerfAccumulate("penalty_waiting_for_channel_E", io.mem_finish.valid && !io.mem_finish.ready)
422  XSPerfAccumulate("penalty_from_grant_to_refill", !w_refill_resp && w_grantlast)
423  XSPerfAccumulate("soft_prefetch_number", primary_fire && io.req.bits.source === SOFT_PREFETCH.U)
424
425  val (mshr_penalty_sample, mshr_penalty) = TransactionLatencyCounter(RegNext(primary_fire), release_entry)
426  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 0, 20, 1, true, true)
427  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 20, 100, 10, true, false)
428
429  val load_miss_begin = primary_fire && io.req.bits.isLoad
430  val refill_finished = RegNext(!w_grantlast && refill_done) && should_refill_data
431  val (load_miss_penalty_sample, load_miss_penalty) = TransactionLatencyCounter(load_miss_begin, refill_finished) // not real refill finish time
432  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 0, 20, 1, true, true)
433  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 20, 100, 10, true, false)
434
435  val (a_to_d_penalty_sample, a_to_d_penalty) = TransactionLatencyCounter(io.mem_acquire.fire(), io.mem_grant.fire() && refill_done)
436  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 0, 20, 1, true, true)
437  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 20, 100, 10, true, false)
438}
439
440class MissQueue(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule with HasPerfEvents {
441  val io = IO(new Bundle {
442    val hartId = Input(UInt(8.W))
443    val req = Flipped(DecoupledIO(new MissReq))
444    val refill_to_ldq = ValidIO(new Refill)
445
446    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
447    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
448    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
449
450    val refill_pipe_req = DecoupledIO(new RefillPipeReq)
451    val refill_pipe_resp = Flipped(ValidIO(UInt(log2Up(cfg.nMissEntries).W)))
452
453    val replace_pipe_req = DecoupledIO(new MainPipeReq)
454    val replace_pipe_resp = Flipped(ValidIO(UInt(log2Up(cfg.nMissEntries).W)))
455
456    val main_pipe_req = DecoupledIO(new MainPipeReq)
457    val main_pipe_resp = Flipped(ValidIO(new AtomicsResp))
458
459    // block probe
460    val probe_addr = Input(UInt(PAddrBits.W))
461    val probe_block = Output(Bool())
462
463    val full = Output(Bool())
464
465    // only for performance counter
466    // This is valid when an mshr has finished replacing a block (w_replace_resp),
467    // but hasn't received Grant from L2 (!w_grantlast)
468    val debug_early_replace = Vec(cfg.nMissEntries, ValidIO(new Bundle() {
469      // info about the block that has been replaced
470      val idx = UInt(idxBits.W) // vaddr
471      val tag = UInt(tagBits.W) // paddr
472    }))
473  })
474
475  // 128KBL1: FIXME: provide vaddr for l2
476
477  val entries = Seq.fill(cfg.nMissEntries)(Module(new MissEntry(edge)))
478
479  val primary_ready_vec = entries.map(_.io.primary_ready)
480  val secondary_ready_vec = entries.map(_.io.secondary_ready)
481  val secondary_reject_vec = entries.map(_.io.secondary_reject)
482  val probe_block_vec = entries.map { case e => e.io.block_addr.valid && e.io.block_addr.bits === io.probe_addr }
483
484  val merge = Cat(secondary_ready_vec).orR
485  val reject = Cat(secondary_reject_vec).orR
486  val alloc = !reject && !merge && Cat(primary_ready_vec).orR
487  val accept = alloc || merge
488
489  assert(RegNext(PopCount(secondary_ready_vec) <= 1.U))
490//  assert(RegNext(PopCount(secondary_reject_vec) <= 1.U))
491  // It is possible that one mshr wants to merge a req, while another mshr wants to reject it.
492  // That is, a coming req has the same paddr as that of mshr_0 (merge),
493  // while it has the same set and the same way as mshr_1 (reject).
494  // In this situation, the coming req should be merged by mshr_0
495//  assert(RegNext(PopCount(Seq(merge, reject)) <= 1.U))
496
497  def select_valid_one[T <: Bundle](
498    in: Seq[DecoupledIO[T]],
499    out: DecoupledIO[T],
500    name: Option[String] = None): Unit = {
501
502    if (name.nonEmpty) { out.suggestName(s"${name.get}_select") }
503    out.valid := Cat(in.map(_.valid)).orR
504    out.bits := ParallelMux(in.map(_.valid) zip in.map(_.bits))
505    in.map(_.ready := out.ready)
506    assert(!RegNext(out.valid && PopCount(Cat(in.map(_.valid))) > 1.U))
507  }
508
509  io.mem_grant.ready := false.B
510
511  entries.zipWithIndex.foreach {
512    case (e, i) =>
513      val former_primary_ready = if(i == 0)
514        false.B
515      else
516        Cat((0 until i).map(j => entries(j).io.primary_ready)).orR
517
518      e.io.id := i.U
519      e.io.req.valid := io.req.valid
520      e.io.primary_valid := io.req.valid &&
521        !merge &&
522        !reject &&
523        !former_primary_ready &&
524        e.io.primary_ready
525      e.io.req.bits := io.req.bits
526
527      e.io.mem_grant.valid := false.B
528      e.io.mem_grant.bits := DontCare
529      when (io.mem_grant.bits.source === i.U) {
530        e.io.mem_grant <> io.mem_grant
531      }
532
533      e.io.refill_pipe_resp := io.refill_pipe_resp.valid && io.refill_pipe_resp.bits === i.U
534      e.io.replace_pipe_resp := io.replace_pipe_resp.valid && io.replace_pipe_resp.bits === i.U
535      e.io.main_pipe_resp := io.main_pipe_resp.valid && io.main_pipe_resp.bits.ack_miss_queue && io.main_pipe_resp.bits.miss_id === i.U
536
537      io.debug_early_replace(i) := e.io.debug_early_replace
538  }
539
540  io.req.ready := accept
541  io.refill_to_ldq.valid := Cat(entries.map(_.io.refill_to_ldq.valid)).orR
542  io.refill_to_ldq.bits := ParallelMux(entries.map(_.io.refill_to_ldq.valid) zip entries.map(_.io.refill_to_ldq.bits))
543
544  TLArbiter.lowest(edge, io.mem_acquire, entries.map(_.io.mem_acquire):_*)
545  TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*)
546
547  arbiter_with_pipereg(entries.map(_.io.refill_pipe_req), io.refill_pipe_req, Some("refill_pipe_req"))
548  arbiter(entries.map(_.io.replace_pipe_req), io.replace_pipe_req, Some("replace_pipe_req"))
549  arbiter(entries.map(_.io.main_pipe_req), io.main_pipe_req, Some("main_pipe_req"))
550
551  io.probe_block := Cat(probe_block_vec).orR
552
553  io.full := ~Cat(entries.map(_.io.primary_ready)).andR
554
555  if (env.EnableDifftest) {
556    val difftest = Module(new DifftestRefillEvent)
557    difftest.io.clock := clock
558    difftest.io.coreid := io.hartId
559    difftest.io.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done
560    difftest.io.addr := io.refill_to_ldq.bits.addr
561    difftest.io.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.io.data)
562  }
563
564  XSPerfAccumulate("miss_req", io.req.fire())
565  XSPerfAccumulate("miss_req_allocate", io.req.fire() && alloc)
566  XSPerfAccumulate("miss_req_merge_load", io.req.fire() && merge && io.req.bits.isLoad)
567  XSPerfAccumulate("miss_req_reject_load", io.req.valid && reject && io.req.bits.isLoad)
568  XSPerfAccumulate("probe_blocked_by_miss", io.probe_block)
569  val max_inflight = RegInit(0.U((log2Up(cfg.nMissEntries) + 1).W))
570  val num_valids = PopCount(~Cat(primary_ready_vec).asUInt)
571  when (num_valids > max_inflight) {
572    max_inflight := num_valids
573  }
574  // max inflight (average) = max_inflight_total / cycle cnt
575  XSPerfAccumulate("max_inflight", max_inflight)
576  QueuePerf(cfg.nMissEntries, num_valids, num_valids === cfg.nMissEntries.U)
577  io.full := num_valids === cfg.nMissEntries.U
578  XSPerfHistogram("num_valids", num_valids, true.B, 0, cfg.nMissEntries, 1)
579
580  val perfEvents = Seq(
581    ("dcache_missq_req      ", io.req.fire()                                                                                                                                                                       ),
582    ("dcache_missq_1_4_valid", (PopCount(entries.map(entry => (!entry.io.primary_ready))) < (cfg.nMissEntries.U/4.U))                                                                                              ),
583    ("dcache_missq_2_4_valid", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U/4.U)) & (PopCount(entries.map(entry => (!entry.io.primary_ready))) <= (cfg.nMissEntries.U/2.U))    ),
584    ("dcache_missq_3_4_valid", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U/2.U)) & (PopCount(entries.map(entry => (!entry.io.primary_ready))) <= (cfg.nMissEntries.U*3.U/4.U))),
585    ("dcache_missq_4_4_valid", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U*3.U/4.U))                                                                                          ),
586  )
587  generatePerfEvent()
588}
589