xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala (revision b03c55a5df5dc8793cb44b42dd60141566e57e78)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import chisel3._
20import chisel3.util._
21import coupledL2.VaddrKey
22import coupledL2.IsKeywordKey
23import difftest._
24import freechips.rocketchip.tilelink.ClientStates._
25import freechips.rocketchip.tilelink.MemoryOpCategories._
26import freechips.rocketchip.tilelink.TLPermissions._
27import freechips.rocketchip.tilelink._
28import huancun.{AliasKey, DirtyKey, PrefetchKey}
29import org.chipsalliance.cde.config.Parameters
30import utility._
31import utils._
32import xiangshan._
33import xiangshan.mem.AddPipelineReg
34import xiangshan.mem.prefetch._
35import xiangshan.mem.trace._
36import xiangshan.mem.LqPtr
37
38class MissReqWoStoreData(implicit p: Parameters) extends DCacheBundle {
39  val source = UInt(sourceTypeWidth.W)
40  val pf_source = UInt(L1PfSourceBits.W)
41  val cmd = UInt(M_SZ.W)
42  val addr = UInt(PAddrBits.W)
43  val vaddr = UInt(VAddrBits.W)
44  val pc = UInt(VAddrBits.W)
45
46  val lqIdx = new LqPtr
47  // store
48  val full_overwrite = Bool()
49
50  // which word does amo work on?
51  val word_idx = UInt(log2Up(blockWords).W)
52  val amo_data = UInt(DataBits.W)
53  val amo_mask = UInt((DataBits / 8).W)
54
55  val req_coh = new ClientMetadata
56  val id = UInt(reqIdWidth.W)
57
58  // For now, miss queue entry req is actually valid when req.valid && !cancel
59  // * req.valid is fast to generate
60  // * cancel is slow to generate, it will not be used until the last moment
61  //
62  // cancel may come from the following sources:
63  // 1. miss req blocked by writeback queue:
64  //      a writeback req of the same address is in progress
65  // 2. pmp check failed
66  val cancel = Bool() // cancel is slow to generate, it will cancel missreq.valid
67
68  // Req source decode
69  // Note that req source is NOT cmd type
70  // For instance, a req which isFromPrefetch may have R or W cmd
71  def isFromLoad = source === LOAD_SOURCE.U
72  def isFromStore = source === STORE_SOURCE.U
73  def isFromAMO = source === AMO_SOURCE.U
74  def isFromPrefetch = source >= DCACHE_PREFETCH_SOURCE.U
75  def isPrefetchWrite = source === DCACHE_PREFETCH_SOURCE.U && cmd === MemoryOpConstants.M_PFW
76  def isPrefetchRead = source === DCACHE_PREFETCH_SOURCE.U && cmd === MemoryOpConstants.M_PFR
77  def hit = req_coh.isValid()
78}
79
80class MissReqStoreData(implicit p: Parameters) extends DCacheBundle {
81  // store data and store mask will be written to miss queue entry
82  // 1 cycle after req.fire() and meta write
83  val store_data = UInt((cfg.blockBytes * 8).W)
84  val store_mask = UInt(cfg.blockBytes.W)
85}
86
87class MissQueueRefillInfo(implicit p: Parameters) extends MissReqStoreData {
88  // refill_info for mainpipe req awake
89  val miss_param = UInt(TLPermissions.bdWidth.W)
90  val miss_dirty = Bool()
91  val error      = Bool()
92}
93
94class MissReq(implicit p: Parameters) extends MissReqWoStoreData {
95  // store data and store mask will be written to miss queue entry
96  // 1 cycle after req.fire() and meta write
97  val store_data = UInt((cfg.blockBytes * 8).W)
98  val store_mask = UInt(cfg.blockBytes.W)
99
100  def toMissReqStoreData(): MissReqStoreData = {
101    val out = Wire(new MissReqStoreData)
102    out.store_data := store_data
103    out.store_mask := store_mask
104    out
105  }
106
107  def toMissReqWoStoreData(): MissReqWoStoreData = {
108    val out = Wire(new MissReqWoStoreData)
109    out.source := source
110    out.pf_source := pf_source
111    out.cmd := cmd
112    out.addr := addr
113    out.vaddr := vaddr
114    out.full_overwrite := full_overwrite
115    out.word_idx := word_idx
116    out.amo_data := amo_data
117    out.amo_mask := amo_mask
118    out.req_coh := req_coh
119    out.id := id
120    out.cancel := cancel
121    out.pc := pc
122    out.lqIdx := lqIdx
123    out
124  }
125}
126
127class MissResp(implicit p: Parameters) extends DCacheBundle {
128  val id = UInt(log2Up(cfg.nMissEntries).W)
129  // cache miss request is handled by miss queue, either merged or newly allocated
130  val handled = Bool()
131  // cache req missed, merged into one of miss queue entries
132  // i.e. !miss_merged means this access is the first miss for this cacheline
133  val merged = Bool()
134}
135
136
137/**
138  * miss queue enq logic: enq is now splited into 2 cycles
139  *  +---------------------------------------------------------------------+    pipeline reg  +-------------------------+
140  *  +         s0: enq source arbiter, judge mshr alloc or merge           +     +-------+    + s1: real alloc or merge +
141  *  +                      +-----+          primary_fire?       ->        +     | alloc |    +                         +
142  *  + mainpipe  -> req0 -> |     |          secondary_fire?     ->        +     | merge |    +                         +
143  *  + loadpipe0 -> req1 -> | arb | -> req                       ->        +  -> | req   | -> +                         +
144  *  + loadpipe1 -> req2 -> |     |          mshr id             ->        +     | id    |    +                         +
145  *  +                      +-----+                                        +     +-------+    +                         +
146  *  +---------------------------------------------------------------------+                  +-------------------------+
147  */
148
149// a pipeline reg between MissReq and MissEntry
150class MissReqPipeRegBundle(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheBundle
151 with HasCircularQueuePtrHelper
152 {
153  val req           = new MissReq
154  // this request is about to merge to an existing mshr
155  val merge         = Bool()
156  // this request is about to allocate a new mshr
157  val alloc         = Bool()
158  val mshr_id       = UInt(log2Up(cfg.nMissEntries).W)
159
160  def reg_valid(): Bool = {
161    (merge || alloc)
162  }
163
164  def matched(new_req: MissReq): Bool = {
165    val block_match = get_block(req.addr) === get_block(new_req.addr)
166    block_match && reg_valid() && !(req.isFromPrefetch)
167  }
168
169  def prefetch_late_en(new_req: MissReqWoStoreData, new_req_valid: Bool): Bool = {
170    val block_match = get_block(req.addr) === get_block(new_req.addr)
171    new_req_valid && alloc && block_match && (req.isFromPrefetch) && !(new_req.isFromPrefetch)
172  }
173
174  def reject_req(new_req: MissReq): Bool = {
175    val block_match = get_block(req.addr) === get_block(new_req.addr)
176    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
177    val merge_load = (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad
178    // store merge to a store is disabled, sbuffer should avoid this situation, as store to same address should preserver their program order to match memory model
179    val merge_store = (req.isFromLoad || req.isFromPrefetch) && new_req.isFromStore
180
181    val set_match = addr_to_dcache_set(req.vaddr) === addr_to_dcache_set(new_req.vaddr)
182
183    Mux(
184        alloc,
185        block_match && (!alias_match || !(merge_load || merge_store)),
186        false.B
187      )
188  }
189
190  def merge_req(new_req: MissReq): Bool = {
191    val block_match = get_block(req.addr) === get_block(new_req.addr)
192    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
193    val merge_load = (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad
194    // store merge to a store is disabled, sbuffer should avoid this situation, as store to same address should preserver their program order to match memory model
195    val merge_store = (req.isFromLoad || req.isFromPrefetch) && new_req.isFromStore
196    Mux(
197        alloc,
198        block_match && alias_match && (merge_load || merge_store),
199        false.B
200      )
201  }
202
203  def merge_isKeyword(new_req: MissReq): Bool = {
204    val load_merge_load  = merge_req(new_req) && req.isFromLoad  && new_req.isFromLoad
205    val store_merge_load = merge_req(new_req) && req.isFromStore && new_req.isFromLoad
206    val load_merge_load_use_new_req_isKeyword = isAfter(req.lqIdx, new_req.lqIdx)
207    val use_new_req_isKeyword = (load_merge_load && load_merge_load_use_new_req_isKeyword) || store_merge_load
208    Mux (
209      use_new_req_isKeyword,
210        new_req.vaddr(5).asBool,
211        req.vaddr(5).asBool
212      )
213  }
214
215  def isKeyword(): Bool= {
216    val alloc_isKeyword = Mux(
217                           alloc,
218                           Mux(
219                            req.isFromLoad,
220                            req.vaddr(5).asBool,
221                            false.B),
222                            false.B)
223    Mux(
224      merge_req(req),
225      merge_isKeyword(req),
226      alloc_isKeyword
227    )
228  }
229  // send out acquire as soon as possible
230  // if a new store miss req is about to merge into this pipe reg, don't send acquire now
231  def can_send_acquire(valid: Bool, new_req: MissReq): Bool = {
232    alloc && !(valid && merge_req(new_req) && new_req.isFromStore)
233  }
234
235  def get_acquire(l2_pf_store_only: Bool): TLBundleA = {
236    val acquire = Wire(new TLBundleA(edge.bundle))
237    val grow_param = req.req_coh.onAccess(req.cmd)._2
238    val acquireBlock = edge.AcquireBlock(
239      fromSource = mshr_id,
240      toAddress = get_block_addr(req.addr),
241      lgSize = (log2Up(cfg.blockBytes)).U,
242      growPermissions = grow_param
243    )._2
244    val acquirePerm = edge.AcquirePerm(
245      fromSource = mshr_id,
246      toAddress = get_block_addr(req.addr),
247      lgSize = (log2Up(cfg.blockBytes)).U,
248      growPermissions = grow_param
249    )._2
250    acquire := Mux(req.full_overwrite, acquirePerm, acquireBlock)
251    // resolve cache alias by L2
252    acquire.user.lift(AliasKey).foreach(_ := req.vaddr(13, 12))
253    // pass vaddr to l2
254    acquire.user.lift(VaddrKey).foreach(_ := req.vaddr(VAddrBits - 1, blockOffBits))
255
256    // miss req pipe reg pass keyword to L2, is priority
257    acquire.echo.lift(IsKeywordKey).foreach(_ := isKeyword())
258
259    // trigger prefetch
260    acquire.user.lift(PrefetchKey).foreach(_ := Mux(l2_pf_store_only, req.isFromStore, true.B))
261    // req source
262    when(req.isFromLoad) {
263      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPULoadData.id.U)
264    }.elsewhen(req.isFromStore) {
265      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUStoreData.id.U)
266    }.elsewhen(req.isFromAMO) {
267      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUAtomicData.id.U)
268    }.otherwise {
269      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U)
270    }
271
272    acquire
273  }
274
275  def block_match(release_addr: UInt): Bool = {
276    reg_valid() && get_block(req.addr) === get_block(release_addr)
277  }
278}
279
280class MissEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule
281  with HasCircularQueuePtrHelper
282 {
283  val io = IO(new Bundle() {
284    val hartId = Input(UInt(hartIdLen.W))
285    // MSHR ID
286    val id = Input(UInt(log2Up(cfg.nMissEntries).W))
287    // client requests
288    // MSHR update request, MSHR state and addr will be updated when req.fire
289    val req = Flipped(ValidIO(new MissReqWoStoreData))
290    // pipeline reg
291    val miss_req_pipe_reg = Input(new MissReqPipeRegBundle(edge))
292    // allocate this entry for new req
293    val primary_valid = Input(Bool())
294    // this entry is free and can be allocated to new reqs
295    val primary_ready = Output(Bool())
296    // this entry is busy, but it can merge the new req
297    val secondary_ready = Output(Bool())
298    // this entry is busy and it can not merge the new req
299    val secondary_reject = Output(Bool())
300    // way selected for replacing, used to support plru update
301    // bus
302    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
303    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
304    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
305
306    // send refill info to load queue, useless now
307    val refill_to_ldq = ValidIO(new Refill)
308
309    // replace pipe
310    val l2_hint = Input(Valid(new L2ToL1Hint())) // Hint from L2 Cache
311
312    // main pipe: amo miss
313    val main_pipe_req = DecoupledIO(new MainPipeReq)
314    val main_pipe_resp = Input(Bool())
315    val main_pipe_refill_resp = Input(Bool())
316    val main_pipe_replay = Input(Bool())
317
318    // for main pipe s2
319    val refill_info = ValidIO(new MissQueueRefillInfo)
320
321    val block_addr = ValidIO(UInt(PAddrBits.W))
322
323    val req_addr = ValidIO(UInt(PAddrBits.W))
324
325    val req_handled_by_this_entry = Output(Bool())
326
327    val forwardInfo = Output(new MissEntryForwardIO)
328    val l2_pf_store_only = Input(Bool())
329
330    // whether the pipeline reg has send out an acquire
331    val acquire_fired_by_pipe_reg = Input(Bool())
332    val memSetPattenDetected = Input(Bool())
333
334    val perf_pending_prefetch = Output(Bool())
335    val perf_pending_normal   = Output(Bool())
336
337    val rob_head_query = new DCacheBundle {
338      val vaddr = Input(UInt(VAddrBits.W))
339      val query_valid = Input(Bool())
340
341      val resp = Output(Bool())
342
343      def hit(e_vaddr: UInt): Bool = {
344        require(e_vaddr.getWidth == VAddrBits)
345        query_valid && vaddr(VAddrBits - 1, DCacheLineOffset) === e_vaddr(VAddrBits - 1, DCacheLineOffset)
346      }
347    }
348
349    val latency_monitor = new DCacheBundle {
350      val load_miss_refilling  = Output(Bool())
351      val store_miss_refilling = Output(Bool())
352      val amo_miss_refilling   = Output(Bool())
353      val pf_miss_refilling    = Output(Bool())
354    }
355
356    val prefetch_info = new DCacheBundle {
357      val late_prefetch = Output(Bool())
358    }
359    val nMaxPrefetchEntry = Input(UInt(64.W))
360    val matched = Output(Bool())
361  })
362
363  assert(!RegNext(io.primary_valid && !io.primary_ready))
364
365  val req = Reg(new MissReqWoStoreData)
366  val req_primary_fire = Reg(new MissReqWoStoreData) // for perf use
367  val req_store_mask = Reg(UInt(cfg.blockBytes.W))
368  val req_valid = RegInit(false.B)
369  val set = addr_to_dcache_set(req.vaddr)
370  // initial keyword
371  val isKeyword = RegInit(false.B)
372
373  val miss_req_pipe_reg_bits = io.miss_req_pipe_reg.req
374
375  val input_req_is_prefetch = isPrefetch(miss_req_pipe_reg_bits.cmd)
376
377  val s_acquire = RegInit(true.B)
378  val s_grantack = RegInit(true.B)
379  val s_mainpipe_req = RegInit(true.B)
380
381  val w_grantfirst = RegInit(true.B)
382  val w_grantlast = RegInit(true.B)
383  val w_mainpipe_resp = RegInit(true.B)
384  val w_refill_resp = RegInit(true.B)
385  val w_l2hint = RegInit(true.B)
386
387  val mainpipe_req_fired = RegInit(true.B)
388
389  val release_entry = s_grantack && w_mainpipe_resp && w_refill_resp
390
391  val acquire_not_sent = !s_acquire && !io.mem_acquire.ready
392  val data_not_refilled = !w_grantfirst
393
394  val error = RegInit(false.B)
395  val prefetch = RegInit(false.B)
396  val access = RegInit(false.B)
397
398  val should_refill_data_reg =  Reg(Bool())
399  val should_refill_data = WireInit(should_refill_data_reg)
400
401  val should_replace = RegInit(false.B)
402
403  val full_overwrite = Reg(Bool())
404
405  val (_, _, refill_done, refill_count) = edge.count(io.mem_grant)
406  val grant_param = Reg(UInt(TLPermissions.bdWidth.W))
407
408  // refill data with store data, this reg will be used to store:
409  // 1. store data (if needed), before l2 refill data
410  // 2. store data and l2 refill data merged result (i.e. new cacheline taht will be write to data array)
411  val refill_and_store_data = Reg(Vec(blockRows, UInt(rowBits.W)))
412  // raw data refilled to l1 by l2
413  val refill_data_raw = Reg(Vec(blockBytes/beatBytes, UInt(beatBits.W)))
414
415  // allocate current miss queue entry for a miss req
416  val primary_fire = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel)
417  // merge miss req to current miss queue entry
418  val secondary_fire = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel)
419
420  val req_handled_by_this_entry = primary_fire || secondary_fire
421
422  // for perf use
423  val secondary_fired = RegInit(false.B)
424
425  io.perf_pending_prefetch := req_valid && prefetch && !secondary_fired
426  io.perf_pending_normal   := req_valid && (!prefetch || secondary_fired)
427
428  io.rob_head_query.resp   := io.rob_head_query.hit(req.vaddr) && req_valid
429
430  io.req_handled_by_this_entry := req_handled_by_this_entry
431
432  when (release_entry && req_valid) {
433    req_valid := false.B
434  }
435
436  when (io.miss_req_pipe_reg.alloc) {
437    assert(RegNext(primary_fire), "after 1 cycle of primary_fire, entry will be allocated")
438    req_valid := true.B
439
440    req := miss_req_pipe_reg_bits.toMissReqWoStoreData()
441    req_primary_fire := miss_req_pipe_reg_bits.toMissReqWoStoreData()
442    req.addr := get_block_addr(miss_req_pipe_reg_bits.addr)
443    //only  load miss need keyword
444    isKeyword := Mux(miss_req_pipe_reg_bits.isFromLoad, miss_req_pipe_reg_bits.vaddr(5).asBool,false.B)
445
446    s_acquire := io.acquire_fired_by_pipe_reg
447    s_grantack := false.B
448    s_mainpipe_req := false.B
449
450    w_grantfirst := false.B
451    w_grantlast := false.B
452    w_l2hint := false.B
453    mainpipe_req_fired := false.B
454
455    when(miss_req_pipe_reg_bits.isFromStore) {
456      req_store_mask := miss_req_pipe_reg_bits.store_mask
457      for (i <- 0 until blockRows) {
458        refill_and_store_data(i) := miss_req_pipe_reg_bits.store_data(rowBits * (i + 1) - 1, rowBits * i)
459      }
460    }
461    full_overwrite := miss_req_pipe_reg_bits.isFromStore && miss_req_pipe_reg_bits.full_overwrite
462
463    when (!miss_req_pipe_reg_bits.isFromAMO) {
464      w_refill_resp := false.B
465    }
466
467    when (miss_req_pipe_reg_bits.isFromAMO) {
468      w_mainpipe_resp := false.B
469    }
470
471    should_refill_data_reg := miss_req_pipe_reg_bits.isFromLoad
472    error := false.B
473    prefetch := input_req_is_prefetch && !io.miss_req_pipe_reg.prefetch_late_en(io.req.bits, io.req.valid)
474    access := false.B
475    secondary_fired := false.B
476  }
477
478  when (io.miss_req_pipe_reg.merge) {
479    assert(RegNext(secondary_fire) || RegNext(RegNext(primary_fire)), "after 1 cycle of secondary_fire or 2 cycle of primary_fire, entry will be merged")
480    assert(miss_req_pipe_reg_bits.req_coh.state <= req.req_coh.state || (prefetch && !access))
481    assert(!(miss_req_pipe_reg_bits.isFromAMO || req.isFromAMO))
482    // use the most uptodate meta
483    req.req_coh := miss_req_pipe_reg_bits.req_coh
484
485    isKeyword := Mux(
486      before_req_sent_can_merge(miss_req_pipe_reg_bits),
487      before_req_sent_merge_iskeyword(miss_req_pipe_reg_bits),
488      isKeyword)
489    assert(!miss_req_pipe_reg_bits.isFromPrefetch, "can not merge a prefetch req, late prefetch should always be ignored!")
490
491    when (miss_req_pipe_reg_bits.isFromStore) {
492      req := miss_req_pipe_reg_bits
493      req.addr := get_block_addr(miss_req_pipe_reg_bits.addr)
494      req_store_mask := miss_req_pipe_reg_bits.store_mask
495      for (i <- 0 until blockRows) {
496        refill_and_store_data(i) := miss_req_pipe_reg_bits.store_data(rowBits * (i + 1) - 1, rowBits * i)
497      }
498      full_overwrite := miss_req_pipe_reg_bits.isFromStore && miss_req_pipe_reg_bits.full_overwrite
499      assert(is_alias_match(req.vaddr, miss_req_pipe_reg_bits.vaddr), "alias bits should be the same when merging store")
500    }
501
502    should_refill_data := should_refill_data_reg || miss_req_pipe_reg_bits.isFromLoad
503    should_refill_data_reg := should_refill_data
504    when (!input_req_is_prefetch) {
505      access := true.B // when merge non-prefetch req, set access bit
506    }
507    secondary_fired := true.B
508  }
509
510  when (io.mem_acquire.fire) {
511    s_acquire := true.B
512  }
513
514  // merge data refilled by l2 and store data, update miss queue entry, gen refill_req
515  val new_data = Wire(Vec(blockRows, UInt(rowBits.W)))
516  val new_mask = Wire(Vec(blockRows, UInt(rowBytes.W)))
517  // merge refilled data and store data (if needed)
518  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
519    val full_wmask = FillInterleaved(8, wmask)
520    (~full_wmask & old_data | full_wmask & new_data)
521  }
522  for (i <- 0 until blockRows) {
523    // new_data(i) := req.store_data(rowBits * (i + 1) - 1, rowBits * i)
524    new_data(i) := refill_and_store_data(i)
525    // we only need to merge data for Store
526    new_mask(i) := Mux(req.isFromStore, req_store_mask(rowBytes * (i + 1) - 1, rowBytes * i), 0.U)
527  }
528
529  val hasData = RegInit(true.B)
530  val isDirty = RegInit(false.B)
531  when (io.mem_grant.fire) {
532    w_grantfirst := true.B
533    grant_param := io.mem_grant.bits.param
534    when (edge.hasData(io.mem_grant.bits)) {
535      // GrantData
536      when (isKeyword) {
537       for (i <- 0 until beatRows) {
538         val idx = ((refill_count << log2Floor(beatRows)) + i.U) ^ 4.U
539         val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
540         refill_and_store_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
541        }
542      }
543      .otherwise{
544       for (i <- 0 until beatRows) {
545         val idx = (refill_count << log2Floor(beatRows)) + i.U
546         val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
547         refill_and_store_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
548        }
549      }
550      w_grantlast := w_grantlast || refill_done
551      hasData := true.B
552    }.otherwise {
553      // Grant
554      assert(full_overwrite)
555      for (i <- 0 until blockRows) {
556        refill_and_store_data(i) := new_data(i)
557      }
558      w_grantlast := true.B
559      hasData := false.B
560    }
561
562    error := io.mem_grant.bits.denied || io.mem_grant.bits.corrupt || error
563
564    refill_data_raw(refill_count ^ isKeyword) := io.mem_grant.bits.data
565    isDirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B)
566  }
567
568  when (io.mem_finish.fire) {
569    s_grantack := true.B
570  }
571
572  when (io.main_pipe_req.fire) {
573    s_mainpipe_req := true.B
574    mainpipe_req_fired := true.B
575  }
576
577  when (io.main_pipe_replay) {
578    s_mainpipe_req := false.B
579  }
580
581  when (io.main_pipe_resp) {
582    w_mainpipe_resp := true.B
583  }
584
585  when(io.main_pipe_refill_resp) {
586    w_refill_resp := true.B
587  }
588
589  when (io.l2_hint.valid) {
590    w_l2hint := true.B
591  }
592
593  def before_req_sent_can_merge(new_req: MissReqWoStoreData): Bool = {
594    acquire_not_sent && (req.isFromLoad || req.isFromPrefetch) && (new_req.isFromLoad || new_req.isFromStore)
595  }
596
597  def before_data_refill_can_merge(new_req: MissReqWoStoreData): Bool = {
598    data_not_refilled && (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad
599  }
600
601  // Note that late prefetch will be ignored
602
603  def should_merge(new_req: MissReqWoStoreData): Bool = {
604    val block_match = get_block(req.addr) === get_block(new_req.addr)
605    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
606    block_match && alias_match &&
607    (
608      before_req_sent_can_merge(new_req) ||
609      before_data_refill_can_merge(new_req)
610    )
611  }
612
613  def before_req_sent_merge_iskeyword(new_req: MissReqWoStoreData): Bool = {
614    val need_check_isKeyword = acquire_not_sent && req.isFromLoad && new_req.isFromLoad && should_merge(new_req)
615    val use_new_req_isKeyword = isAfter(req.lqIdx, new_req.lqIdx)
616    Mux(
617      need_check_isKeyword,
618      Mux(
619        use_new_req_isKeyword,
620        new_req.vaddr(5).asBool,
621        req.vaddr(5).asBool
622      ),
623      isKeyword
624      )
625  }
626
627  // store can be merged before io.mem_acquire.fire
628  // store can not be merged the cycle that io.mem_acquire.fire
629  // load can be merged before io.mem_grant.fire
630  //
631  // TODO: merge store if possible? mem_acquire may need to be re-issued,
632  // but sbuffer entry can be freed
633  def should_reject(new_req: MissReqWoStoreData): Bool = {
634    val block_match = get_block(req.addr) === get_block(new_req.addr)
635    val set_match = set === addr_to_dcache_set(new_req.vaddr)
636    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
637
638    req_valid && Mux(
639        block_match,
640        (!before_req_sent_can_merge(new_req) && !before_data_refill_can_merge(new_req)) || !alias_match,
641        false.B
642      )
643  }
644
645  // req_valid will be updated 1 cycle after primary_fire, so next cycle, this entry cannot accept a new req
646  when(GatedValidRegNext(io.id >= ((cfg.nMissEntries).U - io.nMaxPrefetchEntry))) {
647    // can accept prefetch req
648    io.primary_ready := !req_valid && !GatedValidRegNext(primary_fire)
649  }.otherwise {
650    // cannot accept prefetch req except when a memset patten is detected
651    io.primary_ready := !req_valid && (!io.req.bits.isFromPrefetch || io.memSetPattenDetected) && !GatedValidRegNext(primary_fire)
652  }
653  io.secondary_ready := should_merge(io.req.bits)
654  io.secondary_reject := should_reject(io.req.bits)
655
656  // should not allocate, merge or reject at the same time
657  assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U || !io.req.valid))
658
659  val refill_data_splited = WireInit(VecInit(Seq.tabulate(cfg.blockBytes * 8 / l1BusDataWidth)(i => {
660    val data = refill_and_store_data.asUInt
661    data((i + 1) * l1BusDataWidth - 1, i * l1BusDataWidth)
662  })))
663  // when granted data is all ready, wakeup lq's miss load
664  val refill_to_ldq_en = !w_grantlast && io.mem_grant.fire
665  io.refill_to_ldq.valid := GatedValidRegNext(refill_to_ldq_en)
666  io.refill_to_ldq.bits.addr := RegEnable(req.addr + ((refill_count ^ isKeyword) << refillOffBits), refill_to_ldq_en)
667  io.refill_to_ldq.bits.data := refill_data_splited(RegEnable(refill_count ^ isKeyword, refill_to_ldq_en))
668  io.refill_to_ldq.bits.error := RegEnable(io.mem_grant.bits.corrupt || io.mem_grant.bits.denied, refill_to_ldq_en)
669  io.refill_to_ldq.bits.refill_done := RegEnable(refill_done && io.mem_grant.fire, refill_to_ldq_en)
670  io.refill_to_ldq.bits.hasdata := hasData
671  io.refill_to_ldq.bits.data_raw := refill_data_raw.asUInt
672  io.refill_to_ldq.bits.id := io.id
673
674  // if the entry has a pending merge req, wait for it
675  // Note: now, only wait for store, because store may acquire T
676  io.mem_acquire.valid := !s_acquire && !(io.miss_req_pipe_reg.merge && miss_req_pipe_reg_bits.isFromStore)
677  val grow_param = req.req_coh.onAccess(req.cmd)._2
678  val acquireBlock = edge.AcquireBlock(
679    fromSource = io.id,
680    toAddress = req.addr,
681    lgSize = (log2Up(cfg.blockBytes)).U,
682    growPermissions = grow_param
683  )._2
684  val acquirePerm = edge.AcquirePerm(
685    fromSource = io.id,
686    toAddress = req.addr,
687    lgSize = (log2Up(cfg.blockBytes)).U,
688    growPermissions = grow_param
689  )._2
690  io.mem_acquire.bits := Mux(full_overwrite, acquirePerm, acquireBlock)
691  // resolve cache alias by L2
692  io.mem_acquire.bits.user.lift(AliasKey).foreach( _ := req.vaddr(13, 12))
693  // pass vaddr to l2
694  io.mem_acquire.bits.user.lift(VaddrKey).foreach( _ := req.vaddr(VAddrBits-1, blockOffBits))
695  // pass keyword to L2
696  io.mem_acquire.bits.echo.lift(IsKeywordKey).foreach(_ := isKeyword)
697  // trigger prefetch
698  io.mem_acquire.bits.user.lift(PrefetchKey).foreach(_ := Mux(io.l2_pf_store_only, req.isFromStore, true.B))
699  // req source
700  when(prefetch && !secondary_fired) {
701    io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U)
702  }.otherwise {
703    when(req.isFromStore) {
704      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUStoreData.id.U)
705    }.elsewhen(req.isFromLoad) {
706      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPULoadData.id.U)
707    }.elsewhen(req.isFromAMO) {
708      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUAtomicData.id.U)
709    }.otherwise {
710      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U)
711    }
712  }
713  require(nSets <= 256)
714
715  // io.mem_grant.ready := !w_grantlast && s_acquire
716  io.mem_grant.ready := true.B
717  assert(!(io.mem_grant.valid && !(!w_grantlast && s_acquire)), "dcache should always be ready for mem_grant now")
718
719  val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire)
720  assert(RegNext(!io.mem_grant.fire || edge.isRequest(io.mem_grant.bits)))
721  io.mem_finish.valid := !s_grantack && w_grantfirst
722  io.mem_finish.bits := grantack
723
724  // Send mainpipe_req when receive hint from L2 or receive data without hint
725  io.main_pipe_req.valid := !s_mainpipe_req && (w_l2hint || w_grantlast)
726  io.main_pipe_req.bits := DontCare
727  io.main_pipe_req.bits.miss := true.B
728  io.main_pipe_req.bits.miss_id := io.id
729  io.main_pipe_req.bits.probe := false.B
730  io.main_pipe_req.bits.source := req.source
731  io.main_pipe_req.bits.cmd := req.cmd
732  io.main_pipe_req.bits.vaddr := req.vaddr
733  io.main_pipe_req.bits.addr := req.addr
734  io.main_pipe_req.bits.word_idx := req.word_idx
735  io.main_pipe_req.bits.amo_data := req.amo_data
736  io.main_pipe_req.bits.amo_mask := req.amo_mask
737  io.main_pipe_req.bits.id := req.id
738  io.main_pipe_req.bits.pf_source := req.pf_source
739  io.main_pipe_req.bits.access := access
740
741  io.block_addr.valid := req_valid && w_grantlast
742  io.block_addr.bits := req.addr
743
744  io.req_addr.valid := req_valid
745  io.req_addr.bits := req.addr
746
747  io.refill_info.valid := req_valid && w_grantlast
748  io.refill_info.bits.store_data := refill_and_store_data.asUInt
749  io.refill_info.bits.store_mask := ~0.U(blockBytes.W)
750  io.refill_info.bits.miss_param := grant_param
751  io.refill_info.bits.miss_dirty := isDirty
752  io.refill_info.bits.error      := error
753
754  XSPerfAccumulate("miss_refill_mainpipe_req", io.main_pipe_req.fire)
755  XSPerfAccumulate("miss_refill_without_hint", io.main_pipe_req.fire && !mainpipe_req_fired && !w_l2hint)
756  XSPerfAccumulate("miss_refill_replay", io.main_pipe_replay)
757
758  val w_grantfirst_forward_info = Mux(isKeyword, w_grantlast, w_grantfirst)
759  val w_grantlast_forward_info = Mux(isKeyword, w_grantfirst, w_grantlast)
760  io.forwardInfo.apply(req_valid, req.addr, refill_and_store_data, w_grantfirst_forward_info, w_grantlast_forward_info)
761
762  io.matched := req_valid && (get_block(req.addr) === get_block(io.req.bits.addr)) && !prefetch
763  io.prefetch_info.late_prefetch := io.req.valid && !(io.req.bits.isFromPrefetch) && req_valid && (get_block(req.addr) === get_block(io.req.bits.addr)) && prefetch
764
765  when(io.prefetch_info.late_prefetch) {
766    prefetch := false.B
767  }
768
769  // refill latency monitor
770  val start_counting = GatedValidRegNext(io.mem_acquire.fire) || (GatedValidRegNextN(primary_fire, 2) && s_acquire)
771  io.latency_monitor.load_miss_refilling  := req_valid && req_primary_fire.isFromLoad     && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
772  io.latency_monitor.store_miss_refilling := req_valid && req_primary_fire.isFromStore    && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
773  io.latency_monitor.amo_miss_refilling   := req_valid && req_primary_fire.isFromAMO      && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
774  io.latency_monitor.pf_miss_refilling    := req_valid && req_primary_fire.isFromPrefetch && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
775
776  XSPerfAccumulate("miss_req_primary", primary_fire)
777  XSPerfAccumulate("miss_req_merged", secondary_fire)
778  XSPerfAccumulate("load_miss_penalty_to_use",
779    should_refill_data &&
780      BoolStopWatch(primary_fire, io.refill_to_ldq.valid, true)
781  )
782  XSPerfAccumulate("penalty_between_grantlast_and_release",
783    BoolStopWatch(!RegNext(w_grantlast) && w_grantlast, release_entry, true)
784  )
785  XSPerfAccumulate("main_pipe_penalty", BoolStopWatch(io.main_pipe_req.fire, io.main_pipe_resp))
786  XSPerfAccumulate("penalty_blocked_by_channel_A", io.mem_acquire.valid && !io.mem_acquire.ready)
787  XSPerfAccumulate("penalty_waiting_for_channel_D", s_acquire && !w_grantlast && !io.mem_grant.valid)
788  XSPerfAccumulate("penalty_waiting_for_channel_E", io.mem_finish.valid && !io.mem_finish.ready)
789  XSPerfAccumulate("prefetch_req_primary", primary_fire && io.req.bits.source === DCACHE_PREFETCH_SOURCE.U)
790  XSPerfAccumulate("prefetch_req_merged", secondary_fire && io.req.bits.source === DCACHE_PREFETCH_SOURCE.U)
791  XSPerfAccumulate("can_not_send_acquire_because_of_merging_store", !s_acquire && io.miss_req_pipe_reg.merge && miss_req_pipe_reg_bits.isFromStore)
792
793  val (mshr_penalty_sample, mshr_penalty) = TransactionLatencyCounter(GatedValidRegNextN(primary_fire, 2), release_entry)
794  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 0, 20, 1, true, true)
795  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 20, 100, 10, true, false)
796
797  val load_miss_begin = primary_fire && io.req.bits.isFromLoad
798  val refill_finished = GatedValidRegNext(!w_grantlast && refill_done) && should_refill_data
799  val (load_miss_penalty_sample, load_miss_penalty) = TransactionLatencyCounter(load_miss_begin, refill_finished) // not real refill finish time
800  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 0, 20, 1, true, true)
801  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 20, 100, 10, true, false)
802
803  val (a_to_d_penalty_sample, a_to_d_penalty) = TransactionLatencyCounter(start_counting, GatedValidRegNext(io.mem_grant.fire && refill_done))
804  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 0, 20, 1, true, true)
805  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 20, 100, 10, true, false)
806}
807
808class MissQueue(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule
809  with HasPerfEvents
810  {
811  val io = IO(new Bundle {
812    val hartId = Input(UInt(hartIdLen.W))
813    val req = Flipped(DecoupledIO(new MissReq))
814    val resp = Output(new MissResp)
815    val refill_to_ldq = ValidIO(new Refill)
816
817    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
818    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
819    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
820
821    val l2_hint = Input(Valid(new L2ToL1Hint())) // Hint from L2 Cache
822
823    val main_pipe_req = DecoupledIO(new MainPipeReq)
824    val main_pipe_resp = Flipped(ValidIO(new MainPipeResp))
825
826    val mainpipe_info = Input(new MainPipeInfoToMQ)
827    val refill_info = ValidIO(new MissQueueRefillInfo)
828
829    // block probe
830    val probe_addr = Input(UInt(PAddrBits.W))
831    val probe_block = Output(Bool())
832
833    // block release
834    val release_addr = Flipped(ValidIO(UInt(PAddrBits.W)))
835    val release_block = Output(Bool())
836
837    val full = Output(Bool())
838
839    // forward missqueue
840    val forward = Vec(LoadPipelineWidth, new LduToMissqueueForwardIO)
841    val l2_pf_store_only = Input(Bool())
842
843    val memSetPattenDetected = Output(Bool())
844    val lqEmpty = Input(Bool())
845
846    val prefetch_info = new Bundle {
847      val naive = new Bundle {
848        val late_miss_prefetch = Output(Bool())
849      }
850
851      val fdp = new Bundle {
852        val late_miss_prefetch = Output(Bool())
853        val prefetch_monitor_cnt = Output(Bool())
854        val total_prefetch = Output(Bool())
855      }
856    }
857
858    val mq_enq_cancel = Output(Bool())
859
860    val debugTopDown = new DCacheTopDownIO
861  })
862
863  // 128KBL1: FIXME: provide vaddr for l2
864
865  val entries = Seq.fill(cfg.nMissEntries)(Module(new MissEntry(edge)))
866
867  val miss_req_pipe_reg = RegInit(0.U.asTypeOf(new MissReqPipeRegBundle(edge)))
868  val acquire_from_pipereg = Wire(chiselTypeOf(io.mem_acquire))
869
870  val primary_ready_vec = entries.map(_.io.primary_ready)
871  val secondary_ready_vec = entries.map(_.io.secondary_ready)
872  val secondary_reject_vec = entries.map(_.io.secondary_reject)
873  val probe_block_vec = entries.map { case e => e.io.block_addr.valid && e.io.block_addr.bits === io.probe_addr }
874
875  val merge = ParallelORR(Cat(secondary_ready_vec ++ Seq(miss_req_pipe_reg.merge_req(io.req.bits))))
876  val reject = ParallelORR(Cat(secondary_reject_vec ++ Seq(miss_req_pipe_reg.reject_req(io.req.bits))))
877  val alloc = !reject && !merge && ParallelORR(Cat(primary_ready_vec))
878  val accept = alloc || merge
879
880  val req_mshr_handled_vec = entries.map(_.io.req_handled_by_this_entry)
881  // merged to pipeline reg
882  val req_pipeline_reg_handled = miss_req_pipe_reg.merge_req(io.req.bits) && io.req.valid
883  assert(PopCount(Seq(req_pipeline_reg_handled, VecInit(req_mshr_handled_vec).asUInt.orR)) <= 1.U, "miss req will either go to mshr or pipeline reg")
884  assert(PopCount(req_mshr_handled_vec) <= 1.U, "Only one mshr can handle a req")
885  io.resp.id := Mux(!req_pipeline_reg_handled, OHToUInt(req_mshr_handled_vec), miss_req_pipe_reg.mshr_id)
886  io.resp.handled := Cat(req_mshr_handled_vec).orR || req_pipeline_reg_handled
887  io.resp.merged := merge
888
889  /*  MissQueue enq logic is now splitted into 2 cycles
890   *
891   */
892  when(io.req.valid){
893    miss_req_pipe_reg.req     := io.req.bits
894  }
895  // miss_req_pipe_reg.req     := io.req.bits
896  miss_req_pipe_reg.alloc   := alloc && io.req.valid && !io.req.bits.cancel
897  miss_req_pipe_reg.merge   := merge && io.req.valid && !io.req.bits.cancel
898  miss_req_pipe_reg.mshr_id := io.resp.id
899
900  assert(PopCount(Seq(alloc && io.req.valid, merge && io.req.valid)) <= 1.U, "allocate and merge a mshr in same cycle!")
901
902  val source_except_load_cnt = RegInit(0.U(10.W))
903  when(VecInit(req_mshr_handled_vec).asUInt.orR || req_pipeline_reg_handled) {
904    when(io.req.bits.isFromLoad) {
905      source_except_load_cnt := 0.U
906    }.otherwise {
907      when(io.req.bits.isFromStore) {
908        source_except_load_cnt := source_except_load_cnt + 1.U
909      }
910    }
911  }
912  val Threshold = 8
913  val memSetPattenDetected = GatedValidRegNext((source_except_load_cnt >= Threshold.U) && io.lqEmpty)
914
915  io.memSetPattenDetected := memSetPattenDetected
916
917  val forwardInfo_vec = VecInit(entries.map(_.io.forwardInfo))
918  (0 until LoadPipelineWidth).map(i => {
919    val id = io.forward(i).mshrid
920    val req_valid = io.forward(i).valid
921    val paddr = io.forward(i).paddr
922
923    val (forward_mshr, forwardData) = forwardInfo_vec(id).forward(req_valid, paddr)
924    io.forward(i).forward_result_valid := forwardInfo_vec(id).check(req_valid, paddr)
925    io.forward(i).forward_mshr := forward_mshr
926    io.forward(i).forwardData := forwardData
927  })
928
929  assert(RegNext(PopCount(secondary_ready_vec) <= 1.U || !io.req.valid))
930//  assert(RegNext(PopCount(secondary_reject_vec) <= 1.U))
931  // It is possible that one mshr wants to merge a req, while another mshr wants to reject it.
932  // That is, a coming req has the same paddr as that of mshr_0 (merge),
933  // while it has the same set and the same way as mshr_1 (reject).
934  // In this situation, the coming req should be merged by mshr_0
935//  assert(RegNext(PopCount(Seq(merge, reject)) <= 1.U))
936
937  def select_valid_one[T <: Bundle](
938    in: Seq[DecoupledIO[T]],
939    out: DecoupledIO[T],
940    name: Option[String] = None): Unit = {
941
942    if (name.nonEmpty) { out.suggestName(s"${name.get}_select") }
943    out.valid := Cat(in.map(_.valid)).orR
944    out.bits := ParallelMux(in.map(_.valid) zip in.map(_.bits))
945    in.map(_.ready := out.ready)
946    assert(!RegNext(out.valid && PopCount(Cat(in.map(_.valid))) > 1.U))
947  }
948
949  io.mem_grant.ready := false.B
950
951  val nMaxPrefetchEntry = Constantin.createRecord(s"nMaxPrefetchEntry${p(XSCoreParamsKey).HartId}", initValue = 14)
952  entries.zipWithIndex.foreach {
953    case (e, i) =>
954      val former_primary_ready = if(i == 0)
955        false.B
956      else
957        Cat((0 until i).map(j => entries(j).io.primary_ready)).orR
958
959      e.io.hartId := io.hartId
960      e.io.id := i.U
961      e.io.l2_pf_store_only := io.l2_pf_store_only
962      e.io.req.valid := io.req.valid
963      e.io.primary_valid := io.req.valid &&
964        !merge &&
965        !reject &&
966        !former_primary_ready &&
967        e.io.primary_ready
968      e.io.req.bits := io.req.bits.toMissReqWoStoreData()
969
970      e.io.mem_grant.valid := false.B
971      e.io.mem_grant.bits := DontCare
972      when (io.mem_grant.bits.source === i.U) {
973        e.io.mem_grant <> io.mem_grant
974      }
975
976      when(miss_req_pipe_reg.reg_valid() && miss_req_pipe_reg.mshr_id === i.U) {
977        e.io.miss_req_pipe_reg := miss_req_pipe_reg
978      }.otherwise {
979        e.io.miss_req_pipe_reg       := DontCare
980        e.io.miss_req_pipe_reg.merge := false.B
981        e.io.miss_req_pipe_reg.alloc := false.B
982      }
983
984      e.io.acquire_fired_by_pipe_reg := acquire_from_pipereg.fire
985
986      e.io.main_pipe_resp := io.main_pipe_resp.valid && io.main_pipe_resp.bits.ack_miss_queue && io.main_pipe_resp.bits.miss_id === i.U
987      e.io.main_pipe_replay := io.mainpipe_info.s2_valid && io.mainpipe_info.s2_replay_to_mq && io.mainpipe_info.s2_miss_id === i.U
988      e.io.main_pipe_refill_resp := io.mainpipe_info.s3_valid && io.mainpipe_info.s3_refill_resp && io.mainpipe_info.s3_miss_id === i.U
989
990      e.io.memSetPattenDetected := memSetPattenDetected
991      e.io.nMaxPrefetchEntry := nMaxPrefetchEntry
992
993      e.io.main_pipe_req.ready := io.main_pipe_req.ready
994
995      when(io.l2_hint.bits.sourceId === i.U) {
996        e.io.l2_hint <> io.l2_hint
997      } .otherwise {
998        e.io.l2_hint.valid := false.B
999        e.io.l2_hint.bits := DontCare
1000      }
1001  }
1002
1003  io.req.ready := accept
1004  io.mq_enq_cancel := io.req.bits.cancel
1005  io.refill_to_ldq.valid := Cat(entries.map(_.io.refill_to_ldq.valid)).orR
1006  io.refill_to_ldq.bits := ParallelMux(entries.map(_.io.refill_to_ldq.valid) zip entries.map(_.io.refill_to_ldq.bits))
1007
1008  io.refill_info.valid := VecInit(entries.zipWithIndex.map{ case(e,i) => e.io.refill_info.valid && io.mainpipe_info.s2_valid && io.mainpipe_info.s2_miss_id === i.U}).asUInt.orR
1009  io.refill_info.bits := Mux1H(entries.zipWithIndex.map{ case(e,i) => (io.mainpipe_info.s2_miss_id === i.U) -> e.io.refill_info.bits })
1010
1011  acquire_from_pipereg.valid := miss_req_pipe_reg.can_send_acquire(io.req.valid, io.req.bits)
1012  acquire_from_pipereg.bits := miss_req_pipe_reg.get_acquire(io.l2_pf_store_only)
1013
1014  XSPerfAccumulate("acquire_fire_from_pipereg", acquire_from_pipereg.fire)
1015  XSPerfAccumulate("pipereg_valid", miss_req_pipe_reg.reg_valid())
1016
1017  val acquire_sources = Seq(acquire_from_pipereg) ++ entries.map(_.io.mem_acquire)
1018  TLArbiter.lowest(edge, io.mem_acquire, acquire_sources:_*)
1019  TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*)
1020
1021  // amo's main pipe req out
1022  fastArbiter(entries.map(_.io.main_pipe_req), io.main_pipe_req, Some("main_pipe_req"))
1023
1024  io.probe_block := Cat(probe_block_vec).orR
1025
1026  io.release_block := io.release_addr.valid && Cat(entries.map(e => e.io.req_addr.valid && e.io.req_addr.bits === io.release_addr.bits) ++ Seq(miss_req_pipe_reg.block_match(io.release_addr.bits))).orR
1027
1028  io.full := ~Cat(entries.map(_.io.primary_ready)).andR
1029
1030  // prefetch related
1031  io.prefetch_info.naive.late_miss_prefetch := io.req.valid && io.req.bits.isPrefetchRead && (miss_req_pipe_reg.matched(io.req.bits) || Cat(entries.map(_.io.matched)).orR)
1032
1033  io.prefetch_info.fdp.late_miss_prefetch := (miss_req_pipe_reg.prefetch_late_en(io.req.bits.toMissReqWoStoreData(), io.req.valid) || Cat(entries.map(_.io.prefetch_info.late_prefetch)).orR)
1034  io.prefetch_info.fdp.prefetch_monitor_cnt := io.main_pipe_req.fire
1035  io.prefetch_info.fdp.total_prefetch := alloc && io.req.valid && !io.req.bits.cancel && isFromL1Prefetch(io.req.bits.pf_source)
1036
1037  // L1MissTrace Chisel DB
1038  val debug_miss_trace = Wire(new L1MissTrace)
1039  debug_miss_trace.vaddr := io.req.bits.vaddr
1040  debug_miss_trace.paddr := io.req.bits.addr
1041  debug_miss_trace.source := io.req.bits.source
1042  debug_miss_trace.pc := io.req.bits.pc
1043
1044  val isWriteL1MissQMissTable = Constantin.createRecord(s"isWriteL1MissQMissTable${p(XSCoreParamsKey).HartId}")
1045  val table = ChiselDB.createTable(s"L1MissQMissTrace_hart${p(XSCoreParamsKey).HartId}", new L1MissTrace)
1046  table.log(debug_miss_trace, isWriteL1MissQMissTable.orR && io.req.valid && !io.req.bits.cancel && alloc, "MissQueue", clock, reset)
1047
1048  // Difftest
1049  if (env.EnableDifftest) {
1050    val difftest = DifftestModule(new DiffRefillEvent, dontCare = true)
1051    difftest.coreid := io.hartId
1052    difftest.index := 1.U
1053    difftest.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done
1054    difftest.addr := io.refill_to_ldq.bits.addr
1055    difftest.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.data)
1056    difftest.idtfr := DontCare
1057  }
1058
1059  // Perf count
1060  XSPerfAccumulate("miss_req", io.req.fire && !io.req.bits.cancel)
1061  XSPerfAccumulate("miss_req_allocate", io.req.fire && !io.req.bits.cancel && alloc)
1062  XSPerfAccumulate("miss_req_load_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromLoad)
1063  XSPerfAccumulate("miss_req_store_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromStore)
1064  XSPerfAccumulate("miss_req_amo_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromAMO)
1065  XSPerfAccumulate("miss_req_prefetch_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromPrefetch)
1066  XSPerfAccumulate("miss_req_merge_load", io.req.fire && !io.req.bits.cancel && merge && io.req.bits.isFromLoad)
1067  XSPerfAccumulate("miss_req_reject_load", io.req.valid && !io.req.bits.cancel && reject && io.req.bits.isFromLoad)
1068  XSPerfAccumulate("probe_blocked_by_miss", io.probe_block)
1069  XSPerfAccumulate("prefetch_primary_fire", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromPrefetch)
1070  XSPerfAccumulate("prefetch_secondary_fire", io.req.fire && !io.req.bits.cancel && merge && io.req.bits.isFromPrefetch)
1071  XSPerfAccumulate("memSetPattenDetected", memSetPattenDetected)
1072  val max_inflight = RegInit(0.U((log2Up(cfg.nMissEntries) + 1).W))
1073  val num_valids = PopCount(~Cat(primary_ready_vec).asUInt)
1074  when (num_valids > max_inflight) {
1075    max_inflight := num_valids
1076  }
1077  // max inflight (average) = max_inflight_total / cycle cnt
1078  XSPerfAccumulate("max_inflight", max_inflight)
1079  QueuePerf(cfg.nMissEntries, num_valids, num_valids === cfg.nMissEntries.U)
1080  io.full := num_valids === cfg.nMissEntries.U
1081  XSPerfHistogram("num_valids", num_valids, true.B, 0, cfg.nMissEntries, 1)
1082
1083  XSPerfHistogram("L1DMLP_CPUData", PopCount(VecInit(entries.map(_.io.perf_pending_normal)).asUInt), true.B, 0, cfg.nMissEntries, 1)
1084  XSPerfHistogram("L1DMLP_Prefetch", PopCount(VecInit(entries.map(_.io.perf_pending_prefetch)).asUInt), true.B, 0, cfg.nMissEntries, 1)
1085  XSPerfHistogram("L1DMLP_Total", num_valids, true.B, 0, cfg.nMissEntries, 1)
1086
1087  XSPerfAccumulate("miss_load_refill_latency", PopCount(entries.map(_.io.latency_monitor.load_miss_refilling)))
1088  XSPerfAccumulate("miss_store_refill_latency", PopCount(entries.map(_.io.latency_monitor.store_miss_refilling)))
1089  XSPerfAccumulate("miss_amo_refill_latency", PopCount(entries.map(_.io.latency_monitor.amo_miss_refilling)))
1090  XSPerfAccumulate("miss_pf_refill_latency", PopCount(entries.map(_.io.latency_monitor.pf_miss_refilling)))
1091
1092  val rob_head_miss_in_dcache = VecInit(entries.map(_.io.rob_head_query.resp)).asUInt.orR
1093
1094  entries.foreach {
1095    case e => {
1096      e.io.rob_head_query.query_valid := io.debugTopDown.robHeadVaddr.valid
1097      e.io.rob_head_query.vaddr := io.debugTopDown.robHeadVaddr.bits
1098    }
1099  }
1100
1101  io.debugTopDown.robHeadMissInDCache := rob_head_miss_in_dcache
1102
1103  val perfValidCount = RegNext(PopCount(entries.map(entry => (!entry.io.primary_ready))))
1104  val perfEvents = Seq(
1105    ("dcache_missq_req      ", io.req.fire),
1106    ("dcache_missq_1_4_valid", (perfValidCount < (cfg.nMissEntries.U/4.U))),
1107    ("dcache_missq_2_4_valid", (perfValidCount > (cfg.nMissEntries.U/4.U)) & (perfValidCount <= (cfg.nMissEntries.U/2.U))),
1108    ("dcache_missq_3_4_valid", (perfValidCount > (cfg.nMissEntries.U/2.U)) & (perfValidCount <= (cfg.nMissEntries.U*3.U/4.U))),
1109    ("dcache_missq_4_4_valid", (perfValidCount > (cfg.nMissEntries.U*3.U/4.U))),
1110  )
1111  generatePerfEvent()
1112}