xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala (revision 1cee9cb85eece1a7a6880f1e9945a35c62cb4b3a)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import freechips.rocketchip.tilelink._
24import freechips.rocketchip.tilelink.ClientStates._
25import freechips.rocketchip.tilelink.MemoryOpCategories._
26import freechips.rocketchip.tilelink.TLPermissions._
27import difftest._
28import huancun.{AliasKey, DirtyKey, PreferCacheKey, PrefetchKey}
29import huancun.utils.FastArbiter
30
31class MissReq(implicit p: Parameters) extends DCacheBundle {
32  val source = UInt(sourceTypeWidth.W)
33  val cmd = UInt(M_SZ.W)
34  val addr = UInt(PAddrBits.W)
35  val vaddr = UInt(VAddrBits.W)
36  val way_en = UInt(DCacheWays.W)
37
38  // store
39  val store_data = UInt((cfg.blockBytes * 8).W)
40  val store_mask = UInt(cfg.blockBytes.W)
41
42  // which word does amo work on?
43  val word_idx = UInt(log2Up(blockWords).W)
44  val amo_data = UInt(DataBits.W)
45  val amo_mask = UInt((DataBits / 8).W)
46
47  val req_coh = new ClientMetadata
48  val replace_coh = new ClientMetadata
49  val replace_tag = UInt(tagBits.W)
50  val id = UInt(reqIdWidth.W)
51
52  // For now, miss queue entry req is actually valid when req.valid && !cancel
53  // * req.valid is fast to generate
54  // * cancel is slow to generate, it will not be used until the last moment
55  //
56  // cancel may come from the following sources:
57  // 1. miss req blocked by writeback queue:
58  //      a writeback req of the same address is in progress
59  // 2. pmp check failed
60  val cancel = Bool() // cancel is slow to generate, it will cancel missreq.valid
61
62  def isLoad = source === LOAD_SOURCE.U
63  def isStore = source === STORE_SOURCE.U
64  def isAMO = source === AMO_SOURCE.U
65  def hit = req_coh.isValid()
66}
67
68class MissEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
69  val io = IO(new Bundle() {
70    // MSHR ID
71    val id = Input(UInt(log2Up(cfg.nMissEntries).W))
72    // client requests
73    val req    = Flipped(ValidIO(new MissReq))
74    // allocate this entry for new req
75    val primary_valid = Input(Bool())
76    // this entry is free and can be allocated to new reqs
77    val primary_ready = Output(Bool())
78    // this entry is busy, but it can merge the new req
79    val secondary_ready = Output(Bool())
80    // this entry is busy and it can not merge the new req
81    val secondary_reject = Output(Bool())
82
83    val refill_to_ldq = ValidIO(new Refill)
84
85    // bus
86    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
87    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
88    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
89
90    // refill pipe
91    val refill_pipe_req = DecoupledIO(new RefillPipeReq)
92    val refill_pipe_resp = Input(Bool())
93
94    // replace pipe
95    val replace_pipe_req = DecoupledIO(new MainPipeReq)
96    val replace_pipe_resp = Input(Bool())
97
98    // main pipe: amo miss
99    val main_pipe_req = DecoupledIO(new MainPipeReq)
100    val main_pipe_resp = Input(Bool())
101
102    val block_addr = ValidIO(UInt(PAddrBits.W))
103
104    val debug_early_replace = ValidIO(new Bundle() {
105      // info about the block that has been replaced
106      val idx = UInt(idxBits.W) // vaddr
107      val tag = UInt(tagBits.W) // paddr
108    })
109  })
110
111  assert(!RegNext(io.primary_valid && !io.primary_ready))
112
113  val req = Reg(new MissReq)
114  val req_valid = RegInit(false.B)
115  val set = addr_to_dcache_set(req.vaddr)
116
117  val s_acquire = RegInit(true.B)
118  val s_grantack = RegInit(true.B)
119  val s_replace_req = RegInit(true.B)
120  val s_refill = RegInit(true.B)
121  val s_mainpipe_req = RegInit(true.B)
122
123  val w_grantfirst = RegInit(true.B)
124  val w_grantlast = RegInit(true.B)
125  val w_replace_resp = RegInit(true.B)
126  val w_refill_resp = RegInit(true.B)
127  val w_mainpipe_resp = RegInit(true.B)
128
129  val release_entry = s_grantack && w_refill_resp && w_mainpipe_resp
130
131  val acquire_not_sent = !s_acquire && !io.mem_acquire.ready
132  val data_not_refilled = !w_grantfirst
133
134  val error = RegInit(false.B)
135
136  val should_refill_data_reg =  Reg(Bool())
137  val should_refill_data = WireInit(should_refill_data_reg)
138
139  val full_overwrite = req.isStore && req.store_mask.andR
140
141  val (_, _, refill_done, refill_count) = edge.count(io.mem_grant)
142  val grant_param = Reg(UInt(TLPermissions.bdWidth.W))
143
144  when (release_entry && req_valid) {
145    req_valid := false.B
146  }
147
148  val primary_fire = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel)
149  when (primary_fire) {
150    req_valid := true.B
151    req := io.req.bits
152    req.addr := get_block_addr(io.req.bits.addr)
153
154    s_acquire := false.B
155    s_grantack := false.B
156
157    w_grantfirst := false.B
158    w_grantlast := false.B
159
160    when (!io.req.bits.isAMO) {
161      s_refill := false.B
162      w_refill_resp := false.B
163    }
164
165    when (!io.req.bits.hit && io.req.bits.replace_coh.isValid() && !io.req.bits.isAMO) {
166      s_replace_req := false.B
167      w_replace_resp := false.B
168    }
169
170    when (io.req.bits.isAMO) {
171      s_mainpipe_req := false.B
172      w_mainpipe_resp := false.B
173    }
174
175    should_refill_data_reg := io.req.bits.isLoad
176    error := false.B
177  }
178
179  val secondary_fire = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel)
180  when (secondary_fire) {
181    assert(io.req.bits.req_coh.state <= req.req_coh.state)
182    assert(!(io.req.bits.isAMO || req.isAMO))
183    // use the most uptodate meta
184    req.req_coh := io.req.bits.req_coh
185
186    when (io.req.bits.isStore) {
187      req := io.req.bits
188      req.addr := get_block_addr(io.req.bits.addr)
189      req.way_en := req.way_en
190      req.replace_coh := req.replace_coh
191      req.replace_tag := req.replace_tag
192    }
193
194    should_refill_data := should_refill_data_reg || io.req.bits.isLoad
195    should_refill_data_reg := should_refill_data
196  }
197
198  when (io.mem_acquire.fire()) {
199    s_acquire := true.B
200  }
201
202  val refill_data = Reg(Vec(blockRows, UInt(rowBits.W)))
203  val refill_data_raw = Reg(Vec(blockBytes/beatBytes, UInt(beatBits.W)))
204  val new_data = Wire(Vec(blockRows, UInt(rowBits.W)))
205  val new_mask = Wire(Vec(blockRows, UInt(rowBytes.W)))
206  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
207    val full_wmask = FillInterleaved(8, wmask)
208    (~full_wmask & old_data | full_wmask & new_data)
209  }
210  for (i <- 0 until blockRows) {
211    new_data(i) := req.store_data(rowBits * (i + 1) - 1, rowBits * i)
212    // we only need to merge data for Store
213    new_mask(i) := Mux(req.isStore, req.store_mask(rowBytes * (i + 1) - 1, rowBytes * i), 0.U)
214  }
215  val hasData = RegInit(true.B)
216  val isDirty = RegInit(false.B)
217  when (io.mem_grant.fire()) {
218    w_grantfirst := true.B
219    grant_param := io.mem_grant.bits.param
220    when (edge.hasData(io.mem_grant.bits)) {
221      // GrantData
222      for (i <- 0 until beatRows) {
223        val idx = (refill_count << log2Floor(beatRows)) + i.U
224        val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
225        refill_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
226      }
227      w_grantlast := w_grantlast || refill_done
228      hasData := true.B
229    }.otherwise {
230      // Grant
231      assert(full_overwrite)
232      for (i <- 0 until blockRows) {
233        refill_data(i) := new_data(i)
234      }
235      w_grantlast := true.B
236      hasData := false.B
237    }
238
239    error := io.mem_grant.bits.denied || io.mem_grant.bits.corrupt || error
240
241    refill_data_raw(refill_count) := io.mem_grant.bits.data
242    isDirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B)
243  }
244
245  when (io.mem_finish.fire()) {
246    s_grantack := true.B
247  }
248
249  when (io.replace_pipe_req.fire()) {
250    s_replace_req := true.B
251  }
252
253  when (io.replace_pipe_resp) {
254    w_replace_resp := true.B
255  }
256
257  when (io.refill_pipe_req.fire()) {
258    s_refill := true.B
259  }
260
261  when (io.refill_pipe_resp) {
262    w_refill_resp := true.B
263  }
264
265  when (io.main_pipe_req.fire()) {
266    s_mainpipe_req := true.B
267  }
268
269  when (io.main_pipe_resp) {
270    w_mainpipe_resp := true.B
271  }
272
273  def before_read_sent_can_merge(new_req: MissReq): Bool = {
274    acquire_not_sent && req.isLoad && (new_req.isLoad || new_req.isStore)
275  }
276
277  def before_data_refill_can_merge(new_req: MissReq): Bool = {
278    data_not_refilled && (req.isLoad || req.isStore) && new_req.isLoad
279  }
280
281  def should_merge(new_req: MissReq): Bool = {
282    val block_match = get_block(req.addr) === get_block(new_req.addr)
283    block_match &&
284    (before_read_sent_can_merge(new_req) ||
285      before_data_refill_can_merge(new_req))
286  }
287
288  def should_reject(new_req: MissReq): Bool = {
289    val block_match = get_block(req.addr) === get_block(new_req.addr)
290    val set_match = set === addr_to_dcache_set(new_req.vaddr)
291
292    req_valid &&
293      Mux(
294        block_match,
295        !before_read_sent_can_merge(new_req) &&
296          !before_data_refill_can_merge(new_req),
297        set_match && new_req.way_en === req.way_en
298      )
299  }
300
301  io.primary_ready := !req_valid
302  io.secondary_ready := should_merge(io.req.bits)
303  io.secondary_reject := should_reject(io.req.bits)
304
305  // should not allocate, merge or reject at the same time
306  assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U))
307
308  val refill_data_splited = WireInit(VecInit(Seq.tabulate(cfg.blockBytes * 8 / l1BusDataWidth)(i => {
309    val data = refill_data.asUInt
310    data((i + 1) * l1BusDataWidth - 1, i * l1BusDataWidth)
311  })))
312  io.refill_to_ldq.valid := RegNext(!w_grantlast && io.mem_grant.fire()) && should_refill_data_reg
313  io.refill_to_ldq.bits.addr := RegNext(req.addr + (refill_count << refillOffBits))
314  io.refill_to_ldq.bits.data := refill_data_splited(RegNext(refill_count))
315  io.refill_to_ldq.bits.error := RegNext(io.mem_grant.bits.corrupt || io.mem_grant.bits.denied)
316  io.refill_to_ldq.bits.refill_done := RegNext(refill_done && io.mem_grant.fire())
317  io.refill_to_ldq.bits.hasdata := hasData
318  io.refill_to_ldq.bits.data_raw := refill_data_raw.asUInt
319
320  io.mem_acquire.valid := !s_acquire
321  val grow_param = req.req_coh.onAccess(req.cmd)._2
322  val acquireBlock = edge.AcquireBlock(
323    fromSource = io.id,
324    toAddress = req.addr,
325    lgSize = (log2Up(cfg.blockBytes)).U,
326    growPermissions = grow_param
327  )._2
328  val acquirePerm = edge.AcquirePerm(
329    fromSource = io.id,
330    toAddress = req.addr,
331    lgSize = (log2Up(cfg.blockBytes)).U,
332    growPermissions = grow_param
333  )._2
334  io.mem_acquire.bits := Mux(full_overwrite, acquirePerm, acquireBlock)
335  // resolve cache alias by L2
336  io.mem_acquire.bits.user.lift(AliasKey).foreach( _ := req.vaddr(13, 12))
337  // trigger prefetch
338  io.mem_acquire.bits.user.lift(PrefetchKey).foreach(_ := true.B)
339  // prefer not to cache data in L2 by default
340  io.mem_acquire.bits.user.lift(PreferCacheKey).foreach(_ := false.B)
341  require(nSets <= 256)
342
343  io.mem_grant.ready := !w_grantlast && s_acquire
344
345  val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire())
346  assert(RegNext(!io.mem_grant.fire() || edge.isRequest(io.mem_grant.bits)))
347  io.mem_finish.valid := !s_grantack && w_grantfirst
348  io.mem_finish.bits := grantack
349
350  io.replace_pipe_req.valid := !s_replace_req
351  val replace = io.replace_pipe_req.bits
352  replace := DontCare
353  replace.miss := false.B
354  replace.miss_id := io.id
355  replace.miss_dirty := false.B
356  replace.probe := false.B
357  replace.probe_need_data := false.B
358  replace.source := LOAD_SOURCE.U
359  replace.vaddr := req.vaddr // only untag bits are needed
360  replace.addr := Cat(req.replace_tag, 0.U(pgUntagBits.W)) // only tag bits are needed
361  replace.store_mask := 0.U
362  replace.replace := true.B
363  replace.replace_way_en := req.way_en
364  replace.error := false.B
365
366  io.refill_pipe_req.valid := !s_refill && w_replace_resp && w_grantlast
367  val refill = io.refill_pipe_req.bits
368  refill.source := req.source
369  refill.addr := req.addr
370  refill.way_en := req.way_en
371  refill.wmask := Mux(
372    hasData || req.isLoad,
373    ~0.U(DCacheBanks.W),
374    VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, req.store_mask).orR)).asUInt
375  )
376  refill.data := refill_data.asTypeOf((new RefillPipeReq).data)
377  refill.miss_id := io.id
378  refill.id := req.id
379  def missCohGen(cmd: UInt, param: UInt, dirty: Bool) = {
380    val c = categorize(cmd)
381    MuxLookup(Cat(c, param, dirty), Nothing, Seq(
382      //(effect param) -> (next)
383      Cat(rd, toB, false.B)  -> Branch,
384      Cat(rd, toB, true.B)   -> Branch,
385      Cat(rd, toT, false.B)  -> Trunk,
386      Cat(rd, toT, true.B)   -> Dirty,
387      Cat(wi, toT, false.B)  -> Trunk,
388      Cat(wi, toT, true.B)   -> Dirty,
389      Cat(wr, toT, false.B)  -> Dirty,
390      Cat(wr, toT, true.B)   -> Dirty))
391  }
392  refill.meta.coh := ClientMetadata(missCohGen(req.cmd, grant_param, isDirty))
393  refill.error := error
394  refill.alias := req.vaddr(13, 12) // TODO
395
396  io.main_pipe_req.valid := !s_mainpipe_req && w_grantlast
397  io.main_pipe_req.bits := DontCare
398  io.main_pipe_req.bits.miss := true.B
399  io.main_pipe_req.bits.miss_id := io.id
400  io.main_pipe_req.bits.miss_param := grant_param
401  io.main_pipe_req.bits.miss_dirty := isDirty
402  io.main_pipe_req.bits.miss_way_en := req.way_en
403  io.main_pipe_req.bits.probe := false.B
404  io.main_pipe_req.bits.source := req.source
405  io.main_pipe_req.bits.cmd := req.cmd
406  io.main_pipe_req.bits.vaddr := req.vaddr
407  io.main_pipe_req.bits.addr := req.addr
408  io.main_pipe_req.bits.store_data := refill_data.asUInt
409  io.main_pipe_req.bits.store_mask := ~0.U(blockBytes.W)
410  io.main_pipe_req.bits.word_idx := req.word_idx
411  io.main_pipe_req.bits.amo_data := req.amo_data
412  io.main_pipe_req.bits.amo_mask := req.amo_mask
413  io.main_pipe_req.bits.error := error
414  io.main_pipe_req.bits.id := req.id
415
416  io.block_addr.valid := req_valid && w_grantlast && !w_refill_resp
417  io.block_addr.bits := req.addr
418
419  io.debug_early_replace.valid := BoolStopWatch(io.replace_pipe_resp, io.refill_pipe_req.fire())
420  io.debug_early_replace.bits.idx := addr_to_dcache_set(req.vaddr)
421  io.debug_early_replace.bits.tag := req.replace_tag
422
423  XSPerfAccumulate("miss_req_primary", primary_fire)
424  XSPerfAccumulate("miss_req_merged", secondary_fire)
425  XSPerfAccumulate("load_miss_penalty_to_use",
426    should_refill_data &&
427      BoolStopWatch(primary_fire, io.refill_to_ldq.valid, true)
428  )
429  XSPerfAccumulate("main_pipe_penalty", BoolStopWatch(io.main_pipe_req.fire(), io.main_pipe_resp))
430  XSPerfAccumulate("penalty_blocked_by_channel_A", io.mem_acquire.valid && !io.mem_acquire.ready)
431  XSPerfAccumulate("penalty_waiting_for_channel_D", s_acquire && !w_grantlast && !io.mem_grant.valid)
432  XSPerfAccumulate("penalty_waiting_for_channel_E", io.mem_finish.valid && !io.mem_finish.ready)
433  XSPerfAccumulate("penalty_from_grant_to_refill", !w_refill_resp && w_grantlast)
434  XSPerfAccumulate("soft_prefetch_number", primary_fire && io.req.bits.source === SOFT_PREFETCH.U)
435
436  val (mshr_penalty_sample, mshr_penalty) = TransactionLatencyCounter(RegNext(primary_fire), release_entry)
437  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 0, 20, 1, true, true)
438  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 20, 100, 10, true, false)
439
440  val load_miss_begin = primary_fire && io.req.bits.isLoad
441  val refill_finished = RegNext(!w_grantlast && refill_done) && should_refill_data
442  val (load_miss_penalty_sample, load_miss_penalty) = TransactionLatencyCounter(load_miss_begin, refill_finished) // not real refill finish time
443  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 0, 20, 1, true, true)
444  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 20, 100, 10, true, false)
445
446  val (a_to_d_penalty_sample, a_to_d_penalty) = TransactionLatencyCounter(io.mem_acquire.fire(), io.mem_grant.fire() && refill_done)
447  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 0, 20, 1, true, true)
448  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 20, 100, 10, true, false)
449}
450
451class MissQueue(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule with HasPerfEvents {
452  val io = IO(new Bundle {
453    val hartId = Input(UInt(8.W))
454    val req = Flipped(DecoupledIO(new MissReq))
455    val refill_to_ldq = ValidIO(new Refill)
456
457    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
458    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
459    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
460
461    val refill_pipe_req = DecoupledIO(new RefillPipeReq)
462    val refill_pipe_resp = Flipped(ValidIO(UInt(log2Up(cfg.nMissEntries).W)))
463
464    val replace_pipe_req = DecoupledIO(new MainPipeReq)
465    val replace_pipe_resp = Flipped(ValidIO(UInt(log2Up(cfg.nMissEntries).W)))
466
467    val main_pipe_req = DecoupledIO(new MainPipeReq)
468    val main_pipe_resp = Flipped(ValidIO(new AtomicsResp))
469
470    // block probe
471    val probe_addr = Input(UInt(PAddrBits.W))
472    val probe_block = Output(Bool())
473
474    val full = Output(Bool())
475
476    // only for performance counter
477    // This is valid when an mshr has finished replacing a block (w_replace_resp),
478    // but hasn't received Grant from L2 (!w_grantlast)
479    val debug_early_replace = Vec(cfg.nMissEntries, ValidIO(new Bundle() {
480      // info about the block that has been replaced
481      val idx = UInt(idxBits.W) // vaddr
482      val tag = UInt(tagBits.W) // paddr
483    }))
484  })
485
486  // 128KBL1: FIXME: provide vaddr for l2
487
488  val entries = Seq.fill(cfg.nMissEntries)(Module(new MissEntry(edge)))
489
490  val primary_ready_vec = entries.map(_.io.primary_ready)
491  val secondary_ready_vec = entries.map(_.io.secondary_ready)
492  val secondary_reject_vec = entries.map(_.io.secondary_reject)
493  val probe_block_vec = entries.map { case e => e.io.block_addr.valid && e.io.block_addr.bits === io.probe_addr }
494
495  val merge = Cat(secondary_ready_vec).orR
496  val reject = Cat(secondary_reject_vec).orR
497  val alloc = !reject && !merge && Cat(primary_ready_vec).orR
498  val accept = alloc || merge
499
500  assert(RegNext(PopCount(secondary_ready_vec) <= 1.U))
501//  assert(RegNext(PopCount(secondary_reject_vec) <= 1.U))
502  // It is possible that one mshr wants to merge a req, while another mshr wants to reject it.
503  // That is, a coming req has the same paddr as that of mshr_0 (merge),
504  // while it has the same set and the same way as mshr_1 (reject).
505  // In this situation, the coming req should be merged by mshr_0
506//  assert(RegNext(PopCount(Seq(merge, reject)) <= 1.U))
507
508  def select_valid_one[T <: Bundle](
509    in: Seq[DecoupledIO[T]],
510    out: DecoupledIO[T],
511    name: Option[String] = None): Unit = {
512
513    if (name.nonEmpty) { out.suggestName(s"${name.get}_select") }
514    out.valid := Cat(in.map(_.valid)).orR
515    out.bits := ParallelMux(in.map(_.valid) zip in.map(_.bits))
516    in.map(_.ready := out.ready)
517    assert(!RegNext(out.valid && PopCount(Cat(in.map(_.valid))) > 1.U))
518  }
519
520  io.mem_grant.ready := false.B
521
522  entries.zipWithIndex.foreach {
523    case (e, i) =>
524      val former_primary_ready = if(i == 0)
525        false.B
526      else
527        Cat((0 until i).map(j => entries(j).io.primary_ready)).orR
528
529      e.io.id := i.U
530      e.io.req.valid := io.req.valid
531      e.io.primary_valid := io.req.valid &&
532        !merge &&
533        !reject &&
534        !former_primary_ready &&
535        e.io.primary_ready
536      e.io.req.bits := io.req.bits
537
538      e.io.mem_grant.valid := false.B
539      e.io.mem_grant.bits := DontCare
540      when (io.mem_grant.bits.source === i.U) {
541        e.io.mem_grant <> io.mem_grant
542      }
543
544      e.io.refill_pipe_resp := io.refill_pipe_resp.valid && io.refill_pipe_resp.bits === i.U
545      e.io.replace_pipe_resp := io.replace_pipe_resp.valid && io.replace_pipe_resp.bits === i.U
546      e.io.main_pipe_resp := io.main_pipe_resp.valid && io.main_pipe_resp.bits.ack_miss_queue && io.main_pipe_resp.bits.miss_id === i.U
547
548      io.debug_early_replace(i) := e.io.debug_early_replace
549  }
550
551  io.req.ready := accept
552  io.refill_to_ldq.valid := Cat(entries.map(_.io.refill_to_ldq.valid)).orR
553  io.refill_to_ldq.bits := ParallelMux(entries.map(_.io.refill_to_ldq.valid) zip entries.map(_.io.refill_to_ldq.bits))
554
555  TLArbiter.lowest(edge, io.mem_acquire, entries.map(_.io.mem_acquire):_*)
556  TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*)
557
558  arbiter_with_pipereg(entries.map(_.io.refill_pipe_req), io.refill_pipe_req, Some("refill_pipe_req"))
559  // arbiter(entries.map(_.io.replace_pipe_req), io.replace_pipe_req, Some("replace_pipe_req"))
560  val replace_pipe_req_arb = Module(new FastArbiter(new MainPipeReq, cfg.nMissEntries))
561  replace_pipe_req_arb.io.in.zip(entries.map(_.io.replace_pipe_req)).foreach(a => a._1 <> a._2)
562  io.replace_pipe_req <> replace_pipe_req_arb.io.out
563  arbiter(entries.map(_.io.main_pipe_req), io.main_pipe_req, Some("main_pipe_req"))
564
565  io.probe_block := Cat(probe_block_vec).orR
566
567  io.full := ~Cat(entries.map(_.io.primary_ready)).andR
568
569  if (env.EnableDifftest) {
570    val difftest = Module(new DifftestRefillEvent)
571    difftest.io.clock := clock
572    difftest.io.coreid := io.hartId
573    difftest.io.cacheid := 1.U
574    difftest.io.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done
575    difftest.io.addr := io.refill_to_ldq.bits.addr
576    difftest.io.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.io.data)
577  }
578
579  XSPerfAccumulate("miss_req", io.req.fire())
580  XSPerfAccumulate("miss_req_allocate", io.req.fire() && alloc)
581  XSPerfAccumulate("miss_req_merge_load", io.req.fire() && merge && io.req.bits.isLoad)
582  XSPerfAccumulate("miss_req_reject_load", io.req.valid && reject && io.req.bits.isLoad)
583  XSPerfAccumulate("probe_blocked_by_miss", io.probe_block)
584  val max_inflight = RegInit(0.U((log2Up(cfg.nMissEntries) + 1).W))
585  val num_valids = PopCount(~Cat(primary_ready_vec).asUInt)
586  when (num_valids > max_inflight) {
587    max_inflight := num_valids
588  }
589  // max inflight (average) = max_inflight_total / cycle cnt
590  XSPerfAccumulate("max_inflight", max_inflight)
591  QueuePerf(cfg.nMissEntries, num_valids, num_valids === cfg.nMissEntries.U)
592  io.full := num_valids === cfg.nMissEntries.U
593  XSPerfHistogram("num_valids", num_valids, true.B, 0, cfg.nMissEntries, 1)
594
595  val perfEvents = Seq(
596    ("dcache_missq_req      ", io.req.fire()                                                                                                                                                                       ),
597    ("dcache_missq_1_4_valid", (PopCount(entries.map(entry => (!entry.io.primary_ready))) < (cfg.nMissEntries.U/4.U))                                                                                              ),
598    ("dcache_missq_2_4_valid", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U/4.U)) & (PopCount(entries.map(entry => (!entry.io.primary_ready))) <= (cfg.nMissEntries.U/2.U))    ),
599    ("dcache_missq_3_4_valid", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U/2.U)) & (PopCount(entries.map(entry => (!entry.io.primary_ready))) <= (cfg.nMissEntries.U*3.U/4.U))),
600    ("dcache_missq_4_4_valid", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U*3.U/4.U))                                                                                          ),
601  )
602  generatePerfEvent()
603}
604