xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala (revision dc4fac130426dbec49b49d778b9105d79b4a8eab)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15*
16*
17* Acknowledgement
18*
19* This implementation is inspired by several key papers:
20* [1] David Kroft. "[Lockup-free instruction fetch/prefetch cache organization.]
21* (https://dl.acm.org/doi/10.5555/800052.801868)" 8th Annual Symposium on Computer Architecture (ISCA). 1981.
22***************************************************************************************/
23
24package xiangshan.cache
25
26import chisel3._
27import chisel3.util._
28import coupledL2.VaddrKey
29import coupledL2.IsKeywordKey
30import difftest._
31import freechips.rocketchip.tilelink.ClientStates._
32import freechips.rocketchip.tilelink.MemoryOpCategories._
33import freechips.rocketchip.tilelink.TLPermissions._
34import freechips.rocketchip.tilelink.TLMessages._
35import freechips.rocketchip.tilelink._
36import huancun.{AliasKey, DirtyKey, PrefetchKey}
37import org.chipsalliance.cde.config.Parameters
38import utility._
39import utils._
40import xiangshan._
41import xiangshan.mem.AddPipelineReg
42import xiangshan.mem.prefetch._
43import xiangshan.mem.trace._
44import xiangshan.mem.LqPtr
45
46class MissReqWoStoreData(implicit p: Parameters) extends DCacheBundle {
47  val source = UInt(sourceTypeWidth.W)
48  val pf_source = UInt(L1PfSourceBits.W)
49  val cmd = UInt(M_SZ.W)
50  val addr = UInt(PAddrBits.W)
51  val vaddr = UInt(VAddrBits.W)
52  val pc = UInt(VAddrBits.W)
53
54  val lqIdx = new LqPtr
55  // store
56  val full_overwrite = Bool()
57
58  // which word does amo work on?
59  val word_idx = UInt(log2Up(blockWords).W)
60  val amo_data = UInt(DataBits.W)
61  val amo_mask = UInt((DataBits / 8).W)
62
63  val req_coh = new ClientMetadata
64  val id = UInt(reqIdWidth.W)
65
66  // For now, miss queue entry req is actually valid when req.valid && !cancel
67  // * req.valid is fast to generate
68  // * cancel is slow to generate, it will not be used until the last moment
69  //
70  // cancel may come from the following sources:
71  // 1. miss req blocked by writeback queue:
72  //      a writeback req of the same address is in progress
73  // 2. pmp check failed
74  val cancel = Bool() // cancel is slow to generate, it will cancel missreq.valid
75
76  // Req source decode
77  // Note that req source is NOT cmd type
78  // For instance, a req which isFromPrefetch may have R or W cmd
79  def isFromLoad = source === LOAD_SOURCE.U
80  def isFromStore = source === STORE_SOURCE.U
81  def isFromAMO = source === AMO_SOURCE.U
82  def isFromPrefetch = source >= DCACHE_PREFETCH_SOURCE.U
83  def isPrefetchWrite = source === DCACHE_PREFETCH_SOURCE.U && cmd === MemoryOpConstants.M_PFW
84  def isPrefetchRead = source === DCACHE_PREFETCH_SOURCE.U && cmd === MemoryOpConstants.M_PFR
85  def hit = req_coh.isValid()
86}
87
88class MissReqStoreData(implicit p: Parameters) extends DCacheBundle {
89  // store data and store mask will be written to miss queue entry
90  // 1 cycle after req.fire() and meta write
91  val store_data = UInt((cfg.blockBytes * 8).W)
92  val store_mask = UInt(cfg.blockBytes.W)
93}
94
95class MissQueueRefillInfo(implicit p: Parameters) extends MissReqStoreData {
96  // refill_info for mainpipe req awake
97  val miss_param = UInt(TLPermissions.bdWidth.W)
98  val miss_dirty = Bool()
99  val error      = Bool()
100}
101
102class MissReq(implicit p: Parameters) extends MissReqWoStoreData {
103  // store data and store mask will be written to miss queue entry
104  // 1 cycle after req.fire() and meta write
105  val store_data = UInt((cfg.blockBytes * 8).W)
106  val store_mask = UInt(cfg.blockBytes.W)
107
108  def toMissReqStoreData(): MissReqStoreData = {
109    val out = Wire(new MissReqStoreData)
110    out.store_data := store_data
111    out.store_mask := store_mask
112    out
113  }
114
115  def toMissReqWoStoreData(): MissReqWoStoreData = {
116    val out = Wire(new MissReqWoStoreData)
117    out.source := source
118    out.pf_source := pf_source
119    out.cmd := cmd
120    out.addr := addr
121    out.vaddr := vaddr
122    out.full_overwrite := full_overwrite
123    out.word_idx := word_idx
124    out.amo_data := amo_data
125    out.amo_mask := amo_mask
126    out.req_coh := req_coh
127    out.id := id
128    out.cancel := cancel
129    out.pc := pc
130    out.lqIdx := lqIdx
131    out
132  }
133}
134
135class MissResp(implicit p: Parameters) extends DCacheBundle {
136  val id = UInt(log2Up(cfg.nMissEntries).W)
137  // cache miss request is handled by miss queue, either merged or newly allocated
138  val handled = Bool()
139  // cache req missed, merged into one of miss queue entries
140  // i.e. !miss_merged means this access is the first miss for this cacheline
141  val merged = Bool()
142}
143
144
145/**
146  * miss queue enq logic: enq is now splited into 2 cycles
147  *  +---------------------------------------------------------------------+    pipeline reg  +-------------------------+
148  *  +         s0: enq source arbiter, judge mshr alloc or merge           +     +-------+    + s1: real alloc or merge +
149  *  +                      +-----+          primary_fire?       ->        +     | alloc |    +                         +
150  *  + mainpipe  -> req0 -> |     |          secondary_fire?     ->        +     | merge |    +                         +
151  *  + loadpipe0 -> req1 -> | arb | -> req                       ->        +  -> | req   | -> +                         +
152  *  + loadpipe1 -> req2 -> |     |          mshr id             ->        +     | id    |    +                         +
153  *  +                      +-----+                                        +     +-------+    +                         +
154  *  +---------------------------------------------------------------------+                  +-------------------------+
155  */
156
157// a pipeline reg between MissReq and MissEntry
158class MissReqPipeRegBundle(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheBundle
159 with HasCircularQueuePtrHelper
160 {
161  val req           = new MissReq
162  // this request is about to merge to an existing mshr
163  val merge         = Bool()
164  // this request is about to allocate a new mshr
165  val alloc         = Bool()
166  val cancel        = Bool()
167  val mshr_id       = UInt(log2Up(cfg.nMissEntries).W)
168
169  def reg_valid(): Bool = {
170    (merge || alloc)
171  }
172
173  def matched(new_req: MissReq): Bool = {
174    val block_match = get_block(req.addr) === get_block(new_req.addr)
175    block_match && reg_valid() && !(req.isFromPrefetch)
176  }
177
178  def prefetch_late_en(new_req: MissReqWoStoreData, new_req_valid: Bool): Bool = {
179    val block_match = get_block(req.addr) === get_block(new_req.addr)
180    new_req_valid && alloc && block_match && (req.isFromPrefetch) && !(new_req.isFromPrefetch)
181  }
182
183  def reject_req(new_req: MissReq): Bool = {
184    val block_match = get_block(req.addr) === get_block(new_req.addr)
185    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
186    val merge_load = (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad
187    // store merge to a store is disabled, sbuffer should avoid this situation, as store to same address should preserver their program order to match memory model
188    val merge_store = (req.isFromLoad || req.isFromPrefetch) && new_req.isFromStore
189
190    val set_match = addr_to_dcache_set(req.vaddr) === addr_to_dcache_set(new_req.vaddr)
191
192    Mux(
193        alloc,
194        block_match && (!alias_match || !(merge_load || merge_store)),
195        false.B
196      )
197  }
198
199  def merge_req(new_req: MissReq): Bool = {
200    val block_match = get_block(req.addr) === get_block(new_req.addr)
201    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
202    val merge_load = (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad
203    // store merge to a store is disabled, sbuffer should avoid this situation, as store to same address should preserver their program order to match memory model
204    val merge_store = (req.isFromLoad || req.isFromPrefetch) && new_req.isFromStore
205    Mux(
206        alloc,
207        block_match && alias_match && (merge_load || merge_store),
208        false.B
209      )
210  }
211
212  def merge_isKeyword(new_req: MissReq): Bool = {
213    val load_merge_load  = merge_req(new_req) && req.isFromLoad  && new_req.isFromLoad
214    val store_merge_load = merge_req(new_req) && req.isFromStore && new_req.isFromLoad
215    val load_merge_load_use_new_req_isKeyword = isAfter(req.lqIdx, new_req.lqIdx)
216    val use_new_req_isKeyword = (load_merge_load && load_merge_load_use_new_req_isKeyword) || store_merge_load
217    Mux (
218      use_new_req_isKeyword,
219        new_req.vaddr(5).asBool,
220        req.vaddr(5).asBool
221      )
222  }
223
224  def isKeyword(): Bool= {
225    val alloc_isKeyword = Mux(
226                           alloc,
227                           Mux(
228                            req.isFromLoad,
229                            req.vaddr(5).asBool,
230                            false.B),
231                            false.B)
232    Mux(
233      merge_req(req),
234      merge_isKeyword(req),
235      alloc_isKeyword
236    )
237  }
238  // send out acquire as soon as possible
239  // if a new store miss req is about to merge into this pipe reg, don't send acquire now
240  def can_send_acquire(valid: Bool, new_req: MissReq): Bool = {
241    alloc && !(valid && merge_req(new_req) && new_req.isFromStore)
242  }
243
244  def get_acquire(l2_pf_store_only: Bool): TLBundleA = {
245    val acquire = Wire(new TLBundleA(edge.bundle))
246    val grow_param = req.req_coh.onAccess(req.cmd)._2
247    val acquireBlock = edge.AcquireBlock(
248      fromSource = mshr_id,
249      toAddress = get_block_addr(req.addr),
250      lgSize = (log2Up(cfg.blockBytes)).U,
251      growPermissions = grow_param
252    )._2
253    val acquirePerm = edge.AcquirePerm(
254      fromSource = mshr_id,
255      toAddress = get_block_addr(req.addr),
256      lgSize = (log2Up(cfg.blockBytes)).U,
257      growPermissions = grow_param
258    )._2
259    acquire := Mux(req.full_overwrite, acquirePerm, acquireBlock)
260    // resolve cache alias by L2
261    acquire.user.lift(AliasKey).foreach(_ := req.vaddr(13, 12))
262    // pass vaddr to l2
263    acquire.user.lift(VaddrKey).foreach(_ := req.vaddr(VAddrBits - 1, blockOffBits))
264
265    // miss req pipe reg pass keyword to L2, is priority
266    acquire.echo.lift(IsKeywordKey).foreach(_ := isKeyword())
267
268    // trigger prefetch
269    acquire.user.lift(PrefetchKey).foreach(_ := Mux(l2_pf_store_only, req.isFromStore, true.B))
270    // req source
271    when(req.isFromLoad) {
272      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPULoadData.id.U)
273    }.elsewhen(req.isFromStore) {
274      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUStoreData.id.U)
275    }.elsewhen(req.isFromAMO) {
276      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUAtomicData.id.U)
277    }.otherwise {
278      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U)
279    }
280
281    acquire
282  }
283
284  def block_match(release_addr: UInt): Bool = {
285    reg_valid() && get_block(req.addr) === get_block(release_addr)
286  }
287}
288
289class CMOUnit(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
290  val io = IO(new Bundle() {
291    val req = Flipped(DecoupledIO(new CMOReq))
292    val req_chanA = DecoupledIO(new TLBundleA(edge.bundle))
293    val resp_chanD = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
294    val resp_to_lsq = DecoupledIO(new CMOResp)
295  })
296
297  val s_idle :: s_sreq :: s_wresp :: s_lsq_resp :: Nil = Enum(4)
298  val state = RegInit(s_idle)
299  val state_next = WireInit(state)
300  val req = RegEnable(io.req.bits, io.req.fire)
301
302  state := state_next
303
304  switch (state) {
305    is(s_idle) {
306      when (io.req.fire) {
307        state_next := s_sreq
308      }
309    }
310    is(s_sreq) {
311      when (io.req_chanA.fire) {
312        state_next := s_wresp
313      }
314    }
315    is(s_wresp) {
316      when (io.resp_chanD.fire) {
317        state_next := s_lsq_resp
318      }
319    }
320    is(s_lsq_resp) {
321      when (io.resp_to_lsq.fire) {
322        state_next := s_idle
323      }
324    }
325  }
326
327  io.req.ready := state === s_idle
328
329  io.req_chanA.valid := state === s_sreq
330  io.req_chanA.bits := edge.CacheBlockOperation(
331    fromSource = (cfg.nMissEntries + 1).U,
332    toAddress = req.address,
333    lgSize = (log2Up(cfg.blockBytes)).U,
334    opcode = req.opcode
335  )._2
336
337  io.resp_chanD.ready := state === s_wresp
338
339  io.resp_to_lsq.valid := state === s_lsq_resp
340  io.resp_to_lsq.bits.address := req.address
341
342  assert(!(state =/= s_idle && io.req.valid))
343  assert(!(state =/= s_wresp && io.resp_chanD.valid))
344}
345
346class MissEntry(edge: TLEdgeOut, reqNum: Int)(implicit p: Parameters) extends DCacheModule
347  with HasCircularQueuePtrHelper
348 {
349  val io = IO(new Bundle() {
350    val hartId = Input(UInt(hartIdLen.W))
351    // MSHR ID
352    val id = Input(UInt(log2Up(cfg.nMissEntries).W))
353    // client requests
354    // MSHR update request, MSHR state and addr will be updated when req.fire
355    val req = Flipped(ValidIO(new MissReqWoStoreData))
356    val wbq_block_miss_req = Input(Bool())
357    // pipeline reg
358    val miss_req_pipe_reg = Input(new MissReqPipeRegBundle(edge))
359    // allocate this entry for new req
360    val primary_valid = Input(Bool())
361    // this entry is free and can be allocated to new reqs
362    val primary_ready = Output(Bool())
363    // this entry is busy, but it can merge the new req
364    val secondary_ready = Output(Bool())
365    // this entry is busy and it can not merge the new req
366    val secondary_reject = Output(Bool())
367    // way selected for replacing, used to support plru update
368    // bus
369    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
370    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
371    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
372
373    val queryME = Vec(reqNum, Flipped(new DCacheMEQueryIOBundle))
374
375    // send refill info to load queue, useless now
376    val refill_to_ldq = ValidIO(new Refill)
377
378    // replace pipe
379    val l2_hint = Input(Valid(new L2ToL1Hint())) // Hint from L2 Cache
380
381    // main pipe: amo miss
382    val main_pipe_req = DecoupledIO(new MainPipeReq)
383    val main_pipe_resp = Input(Bool())
384    val main_pipe_refill_resp = Input(Bool())
385    val main_pipe_replay = Input(Bool())
386
387    // for main pipe s2
388    val refill_info = ValidIO(new MissQueueRefillInfo)
389
390    val block_addr = ValidIO(UInt(PAddrBits.W))
391
392    val req_addr = ValidIO(UInt(PAddrBits.W))
393
394    val req_handled_by_this_entry = Output(Bool())
395
396    val forwardInfo = Output(new MissEntryForwardIO)
397    val l2_pf_store_only = Input(Bool())
398
399    // whether the pipeline reg has send out an acquire
400    val acquire_fired_by_pipe_reg = Input(Bool())
401    val memSetPattenDetected = Input(Bool())
402
403    val perf_pending_prefetch = Output(Bool())
404    val perf_pending_normal   = Output(Bool())
405
406    val rob_head_query = new DCacheBundle {
407      val vaddr = Input(UInt(VAddrBits.W))
408      val query_valid = Input(Bool())
409
410      val resp = Output(Bool())
411
412      def hit(e_vaddr: UInt): Bool = {
413        require(e_vaddr.getWidth == VAddrBits)
414        query_valid && vaddr(VAddrBits - 1, DCacheLineOffset) === e_vaddr(VAddrBits - 1, DCacheLineOffset)
415      }
416    }
417
418    val latency_monitor = new DCacheBundle {
419      val load_miss_refilling  = Output(Bool())
420      val store_miss_refilling = Output(Bool())
421      val amo_miss_refilling   = Output(Bool())
422      val pf_miss_refilling    = Output(Bool())
423    }
424
425    val prefetch_info = new DCacheBundle {
426      val late_prefetch = Output(Bool())
427    }
428    val nMaxPrefetchEntry = Input(UInt(64.W))
429    val matched = Output(Bool())
430  })
431
432  assert(!RegNext(io.primary_valid && !io.primary_ready))
433
434  val req = Reg(new MissReqWoStoreData)
435  val req_primary_fire = Reg(new MissReqWoStoreData) // for perf use
436  val req_store_mask = Reg(UInt(cfg.blockBytes.W))
437  val req_valid = RegInit(false.B)
438  val set = addr_to_dcache_set(req.vaddr)
439  // initial keyword
440  val isKeyword = RegInit(false.B)
441
442  val miss_req_pipe_reg_bits = io.miss_req_pipe_reg.req
443
444  val input_req_is_prefetch = isPrefetch(miss_req_pipe_reg_bits.cmd)
445
446  val s_acquire = RegInit(true.B)
447  val s_grantack = RegInit(true.B)
448  val s_mainpipe_req = RegInit(true.B)
449
450  val w_grantfirst = RegInit(true.B)
451  val w_grantlast = RegInit(true.B)
452  val w_mainpipe_resp = RegInit(true.B)
453  val w_refill_resp = RegInit(true.B)
454  val w_l2hint = RegInit(true.B)
455
456  val mainpipe_req_fired = RegInit(true.B)
457
458  val release_entry = s_grantack && w_mainpipe_resp && w_refill_resp
459
460  val acquire_not_sent = !s_acquire && !io.mem_acquire.ready
461  val data_not_refilled = !w_grantfirst
462
463  val error = RegInit(false.B)
464  val prefetch = RegInit(false.B)
465  val access = RegInit(false.B)
466
467  val should_refill_data_reg =  Reg(Bool())
468  val should_refill_data = WireInit(should_refill_data_reg)
469
470  val should_replace = RegInit(false.B)
471
472  val full_overwrite = Reg(Bool())
473
474  val (_, _, refill_done, refill_count) = edge.count(io.mem_grant)
475  val grant_param = Reg(UInt(TLPermissions.bdWidth.W))
476
477  // refill data with store data, this reg will be used to store:
478  // 1. store data (if needed), before l2 refill data
479  // 2. store data and l2 refill data merged result (i.e. new cacheline taht will be write to data array)
480  val refill_and_store_data = Reg(Vec(blockRows, UInt(rowBits.W)))
481  // raw data refilled to l1 by l2
482  val refill_data_raw = Reg(Vec(blockBytes/beatBytes, UInt(beatBits.W)))
483
484  // allocate current miss queue entry for a miss req
485  val primary_fire = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel && !io.wbq_block_miss_req)
486  val primary_accept = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel)
487  // merge miss req to current miss queue entry
488  val secondary_fire = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel && !io.wbq_block_miss_req)
489  val secondary_accept = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel)
490
491  val req_handled_by_this_entry = primary_accept || secondary_accept
492
493  // for perf use
494  val secondary_fired = RegInit(false.B)
495
496  io.perf_pending_prefetch := req_valid && prefetch && !secondary_fired
497  io.perf_pending_normal   := req_valid && (!prefetch || secondary_fired)
498
499  io.rob_head_query.resp   := io.rob_head_query.hit(req.vaddr) && req_valid
500
501  io.req_handled_by_this_entry := req_handled_by_this_entry
502
503  when (release_entry && req_valid) {
504    req_valid := false.B
505  }
506
507  when (io.miss_req_pipe_reg.alloc && !io.miss_req_pipe_reg.cancel) {
508    assert(RegNext(primary_fire), "after 1 cycle of primary_fire, entry will be allocated")
509    req_valid := true.B
510
511    req := miss_req_pipe_reg_bits.toMissReqWoStoreData()
512    req_primary_fire := miss_req_pipe_reg_bits.toMissReqWoStoreData()
513    req.addr := get_block_addr(miss_req_pipe_reg_bits.addr)
514    //only  load miss need keyword
515    isKeyword := Mux(miss_req_pipe_reg_bits.isFromLoad, miss_req_pipe_reg_bits.vaddr(5).asBool,false.B)
516
517    s_acquire := io.acquire_fired_by_pipe_reg
518    s_grantack := false.B
519    s_mainpipe_req := false.B
520
521    w_grantfirst := false.B
522    w_grantlast := false.B
523    w_l2hint := false.B
524    mainpipe_req_fired := false.B
525
526    when(miss_req_pipe_reg_bits.isFromStore) {
527      req_store_mask := miss_req_pipe_reg_bits.store_mask
528      for (i <- 0 until blockRows) {
529        refill_and_store_data(i) := miss_req_pipe_reg_bits.store_data(rowBits * (i + 1) - 1, rowBits * i)
530      }
531    }
532    full_overwrite := miss_req_pipe_reg_bits.isFromStore && miss_req_pipe_reg_bits.full_overwrite
533
534    when (!miss_req_pipe_reg_bits.isFromAMO) {
535      w_refill_resp := false.B
536    }
537
538    when (miss_req_pipe_reg_bits.isFromAMO) {
539      w_mainpipe_resp := false.B
540    }
541
542    should_refill_data_reg := miss_req_pipe_reg_bits.isFromLoad
543    error := false.B
544    prefetch := input_req_is_prefetch && !io.miss_req_pipe_reg.prefetch_late_en(io.req.bits, io.req.valid)
545    access := false.B
546    secondary_fired := false.B
547  }
548
549  when (io.miss_req_pipe_reg.merge && !io.miss_req_pipe_reg.cancel) {
550    assert(RegNext(secondary_fire) || RegNext(RegNext(primary_fire)), "after 1 cycle of secondary_fire or 2 cycle of primary_fire, entry will be merged")
551    assert(miss_req_pipe_reg_bits.req_coh.state <= req.req_coh.state || (prefetch && !access))
552    assert(!(miss_req_pipe_reg_bits.isFromAMO || req.isFromAMO))
553    // use the most uptodate meta
554    req.req_coh := miss_req_pipe_reg_bits.req_coh
555
556    isKeyword := Mux(
557      before_req_sent_can_merge(miss_req_pipe_reg_bits),
558      before_req_sent_merge_iskeyword(miss_req_pipe_reg_bits),
559      isKeyword)
560    assert(!miss_req_pipe_reg_bits.isFromPrefetch, "can not merge a prefetch req, late prefetch should always be ignored!")
561
562    when (miss_req_pipe_reg_bits.isFromStore) {
563      req := miss_req_pipe_reg_bits
564      req.addr := get_block_addr(miss_req_pipe_reg_bits.addr)
565      req_store_mask := miss_req_pipe_reg_bits.store_mask
566      for (i <- 0 until blockRows) {
567        refill_and_store_data(i) := miss_req_pipe_reg_bits.store_data(rowBits * (i + 1) - 1, rowBits * i)
568      }
569      full_overwrite := miss_req_pipe_reg_bits.isFromStore && miss_req_pipe_reg_bits.full_overwrite
570      assert(is_alias_match(req.vaddr, miss_req_pipe_reg_bits.vaddr), "alias bits should be the same when merging store")
571    }
572
573    should_refill_data := should_refill_data_reg || miss_req_pipe_reg_bits.isFromLoad
574    should_refill_data_reg := should_refill_data
575    when (!input_req_is_prefetch) {
576      access := true.B // when merge non-prefetch req, set access bit
577    }
578    secondary_fired := true.B
579  }
580
581  when (io.mem_acquire.fire) {
582    s_acquire := true.B
583  }
584
585  // merge data refilled by l2 and store data, update miss queue entry, gen refill_req
586  val new_data = Wire(Vec(blockRows, UInt(rowBits.W)))
587  val new_mask = Wire(Vec(blockRows, UInt(rowBytes.W)))
588  // merge refilled data and store data (if needed)
589  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
590    val full_wmask = FillInterleaved(8, wmask)
591    (~full_wmask & old_data | full_wmask & new_data)
592  }
593  for (i <- 0 until blockRows) {
594    // new_data(i) := req.store_data(rowBits * (i + 1) - 1, rowBits * i)
595    new_data(i) := refill_and_store_data(i)
596    // we only need to merge data for Store
597    new_mask(i) := Mux(req.isFromStore, req_store_mask(rowBytes * (i + 1) - 1, rowBytes * i), 0.U)
598  }
599
600  val hasData = RegInit(true.B)
601  val isDirty = RegInit(false.B)
602  when (io.mem_grant.fire) {
603    w_grantfirst := true.B
604    grant_param := io.mem_grant.bits.param
605    when (edge.hasData(io.mem_grant.bits)) {
606      // GrantData
607      when (isKeyword) {
608       for (i <- 0 until beatRows) {
609         val idx = ((refill_count << log2Floor(beatRows)) + i.U) ^ 4.U
610         val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
611         refill_and_store_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
612        }
613      }
614      .otherwise{
615       for (i <- 0 until beatRows) {
616         val idx = (refill_count << log2Floor(beatRows)) + i.U
617         val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
618         refill_and_store_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
619        }
620      }
621      w_grantlast := w_grantlast || refill_done
622      hasData := true.B
623    }.otherwise {
624      // Grant
625      assert(full_overwrite)
626      for (i <- 0 until blockRows) {
627        refill_and_store_data(i) := new_data(i)
628      }
629      w_grantlast := true.B
630      hasData := false.B
631    }
632
633    error := io.mem_grant.bits.denied || io.mem_grant.bits.corrupt || error
634
635    refill_data_raw(refill_count ^ isKeyword) := io.mem_grant.bits.data
636    isDirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B)
637  }
638
639  when (io.mem_finish.fire) {
640    s_grantack := true.B
641  }
642
643  when (io.main_pipe_req.fire) {
644    s_mainpipe_req := true.B
645    mainpipe_req_fired := true.B
646  }
647
648  when (io.main_pipe_replay) {
649    s_mainpipe_req := false.B
650  }
651
652  when (io.main_pipe_resp) {
653    w_mainpipe_resp := true.B
654  }
655
656  when(io.main_pipe_refill_resp) {
657    w_refill_resp := true.B
658  }
659
660  when (io.l2_hint.valid) {
661    w_l2hint := true.B
662  }
663
664  def before_req_sent_can_merge(new_req: MissReqWoStoreData): Bool = {
665    // acquire_not_sent && (new_req.isFromLoad || new_req.isFromStore)
666
667    // Since most acquire requests have been issued from pipe_reg,
668    // the number of such merge situations is currently small,
669    // So dont Merge anything for better timing.
670    false.B
671  }
672
673  def before_data_refill_can_merge(new_req: MissReqWoStoreData): Bool = {
674    data_not_refilled && new_req.isFromLoad
675  }
676
677  // Note that late prefetch will be ignored
678
679  def should_merge(new_req: MissReqWoStoreData): Bool = {
680    val block_match = get_block(req.addr) === get_block(new_req.addr)
681    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
682    block_match && alias_match &&
683    (
684      before_req_sent_can_merge(new_req) ||
685      before_data_refill_can_merge(new_req)
686    )
687  }
688
689  def before_req_sent_merge_iskeyword(new_req: MissReqWoStoreData): Bool = {
690    val need_check_isKeyword = acquire_not_sent && req.isFromLoad && new_req.isFromLoad && should_merge(new_req)
691    val use_new_req_isKeyword = isAfter(req.lqIdx, new_req.lqIdx)
692    Mux(
693      need_check_isKeyword,
694      Mux(
695        use_new_req_isKeyword,
696        new_req.vaddr(5).asBool,
697        req.vaddr(5).asBool
698      ),
699      isKeyword
700      )
701  }
702
703  // store can be merged before io.mem_acquire.fire
704  // store can not be merged the cycle that io.mem_acquire.fire
705  // load can be merged before io.mem_grant.fire
706  //
707  // TODO: merge store if possible? mem_acquire may need to be re-issued,
708  // but sbuffer entry can be freed
709  def should_reject(new_req: MissReqWoStoreData): Bool = {
710    val block_match = get_block(req.addr) === get_block(new_req.addr)
711    val set_match = set === addr_to_dcache_set(new_req.vaddr)
712    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
713
714    req_valid && Mux(
715        block_match,
716        (!before_req_sent_can_merge(new_req) && !before_data_refill_can_merge(new_req)) || !alias_match,
717        false.B
718      )
719  }
720
721  // req_valid will be updated 1 cycle after primary_fire, so next cycle, this entry cannot accept a new req
722  when(GatedValidRegNext(io.id >= ((cfg.nMissEntries).U - io.nMaxPrefetchEntry))) {
723    // can accept prefetch req
724    io.primary_ready := !req_valid && !GatedValidRegNext(primary_fire)
725  }.otherwise {
726    // cannot accept prefetch req except when a memset patten is detected
727    io.primary_ready := !req_valid && (!io.req.bits.isFromPrefetch || io.memSetPattenDetected) && !GatedValidRegNext(primary_fire)
728  }
729  io.secondary_ready := should_merge(io.req.bits)
730  io.secondary_reject := should_reject(io.req.bits)
731
732  // generate primary_ready & secondary_(ready | reject) for each miss request
733  for (i <- 0 until reqNum) {
734    when(GatedValidRegNext(io.id >= ((cfg.nMissEntries).U - io.nMaxPrefetchEntry))) {
735      io.queryME(i).primary_ready := !req_valid && !GatedValidRegNext(primary_fire)
736    }.otherwise {
737      io.queryME(i).primary_ready := !req_valid && !GatedValidRegNext(primary_fire) &&
738                                    (!io.queryME(i).req.bits.isFromPrefetch || io.memSetPattenDetected)
739    }
740    io.queryME(i).secondary_ready  := should_merge(io.queryME(i).req.bits)
741    io.queryME(i).secondary_reject := should_reject(io.queryME(i).req.bits)
742  }
743
744  // should not allocate, merge or reject at the same time
745  assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U || !io.req.valid))
746
747  val refill_data_splited = WireInit(VecInit(Seq.tabulate(cfg.blockBytes * 8 / l1BusDataWidth)(i => {
748    val data = refill_and_store_data.asUInt
749    data((i + 1) * l1BusDataWidth - 1, i * l1BusDataWidth)
750  })))
751  // when granted data is all ready, wakeup lq's miss load
752  val refill_to_ldq_en = !w_grantlast && io.mem_grant.fire
753  io.refill_to_ldq.valid := GatedValidRegNext(refill_to_ldq_en)
754  io.refill_to_ldq.bits.addr := RegEnable(req.addr + ((refill_count ^ isKeyword) << refillOffBits), refill_to_ldq_en)
755  io.refill_to_ldq.bits.data := refill_data_splited(RegEnable(refill_count ^ isKeyword, refill_to_ldq_en))
756  io.refill_to_ldq.bits.error := RegEnable(io.mem_grant.bits.corrupt || io.mem_grant.bits.denied, refill_to_ldq_en)
757  io.refill_to_ldq.bits.refill_done := RegEnable(refill_done && io.mem_grant.fire, refill_to_ldq_en)
758  io.refill_to_ldq.bits.hasdata := hasData
759  io.refill_to_ldq.bits.data_raw := refill_data_raw.asUInt
760  io.refill_to_ldq.bits.id := io.id
761
762  // if the entry has a pending merge req, wait for it
763  // Note: now, only wait for store, because store may acquire T
764  io.mem_acquire.valid := !s_acquire && !(io.miss_req_pipe_reg.merge && !io.miss_req_pipe_reg.cancel && miss_req_pipe_reg_bits.isFromStore)
765  val grow_param = req.req_coh.onAccess(req.cmd)._2
766  val acquireBlock = edge.AcquireBlock(
767    fromSource = io.id,
768    toAddress = req.addr,
769    lgSize = (log2Up(cfg.blockBytes)).U,
770    growPermissions = grow_param
771  )._2
772  val acquirePerm = edge.AcquirePerm(
773    fromSource = io.id,
774    toAddress = req.addr,
775    lgSize = (log2Up(cfg.blockBytes)).U,
776    growPermissions = grow_param
777  )._2
778  io.mem_acquire.bits := Mux(full_overwrite, acquirePerm, acquireBlock)
779  // resolve cache alias by L2
780  io.mem_acquire.bits.user.lift(AliasKey).foreach( _ := req.vaddr(13, 12))
781  // pass vaddr to l2
782  io.mem_acquire.bits.user.lift(VaddrKey).foreach( _ := req.vaddr(VAddrBits-1, blockOffBits))
783  // pass keyword to L2
784  io.mem_acquire.bits.echo.lift(IsKeywordKey).foreach(_ := isKeyword)
785  // trigger prefetch
786  io.mem_acquire.bits.user.lift(PrefetchKey).foreach(_ := Mux(io.l2_pf_store_only, req.isFromStore, true.B))
787  // req source
788  when(prefetch && !secondary_fired) {
789    io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U)
790  }.otherwise {
791    when(req.isFromStore) {
792      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUStoreData.id.U)
793    }.elsewhen(req.isFromLoad) {
794      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPULoadData.id.U)
795    }.elsewhen(req.isFromAMO) {
796      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUAtomicData.id.U)
797    }.otherwise {
798      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U)
799    }
800  }
801  require(nSets <= 256)
802
803  // io.mem_grant.ready := !w_grantlast && s_acquire
804  io.mem_grant.ready := true.B
805  assert(!(io.mem_grant.valid && !(!w_grantlast && s_acquire)), "dcache should always be ready for mem_grant now")
806
807  val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire)
808  assert(RegNext(!io.mem_grant.fire || edge.isRequest(io.mem_grant.bits)))
809  io.mem_finish.valid := !s_grantack && w_grantfirst
810  io.mem_finish.bits := grantack
811
812  // Send mainpipe_req when receive hint from L2 or receive data without hint
813  io.main_pipe_req.valid := !s_mainpipe_req && (w_l2hint || w_grantlast)
814  io.main_pipe_req.bits := DontCare
815  io.main_pipe_req.bits.miss := true.B
816  io.main_pipe_req.bits.miss_id := io.id
817  io.main_pipe_req.bits.probe := false.B
818  io.main_pipe_req.bits.source := req.source
819  io.main_pipe_req.bits.cmd := req.cmd
820  io.main_pipe_req.bits.vaddr := req.vaddr
821  io.main_pipe_req.bits.addr := req.addr
822  io.main_pipe_req.bits.word_idx := req.word_idx
823  io.main_pipe_req.bits.amo_data := req.amo_data
824  io.main_pipe_req.bits.amo_mask := req.amo_mask
825  io.main_pipe_req.bits.id := req.id
826  io.main_pipe_req.bits.pf_source := req.pf_source
827  io.main_pipe_req.bits.access := access
828
829  io.block_addr.valid := req_valid && w_grantlast
830  io.block_addr.bits := req.addr
831
832  io.req_addr.valid := req_valid
833  io.req_addr.bits := req.addr
834
835  io.refill_info.valid := req_valid && w_grantlast
836  io.refill_info.bits.store_data := refill_and_store_data.asUInt
837  io.refill_info.bits.store_mask := ~0.U(blockBytes.W)
838  io.refill_info.bits.miss_param := grant_param
839  io.refill_info.bits.miss_dirty := isDirty
840  io.refill_info.bits.error      := error
841
842  XSPerfAccumulate("miss_refill_mainpipe_req", io.main_pipe_req.fire)
843  XSPerfAccumulate("miss_refill_without_hint", io.main_pipe_req.fire && !mainpipe_req_fired && !w_l2hint)
844  XSPerfAccumulate("miss_refill_replay", io.main_pipe_replay)
845
846  val w_grantfirst_forward_info = Mux(isKeyword, w_grantlast, w_grantfirst)
847  val w_grantlast_forward_info = Mux(isKeyword, w_grantfirst, w_grantlast)
848  io.forwardInfo.apply(req_valid, req.addr, refill_and_store_data, w_grantfirst_forward_info, w_grantlast_forward_info)
849
850  io.matched := req_valid && (get_block(req.addr) === get_block(io.req.bits.addr)) && !prefetch
851  io.prefetch_info.late_prefetch := io.req.valid && !(io.req.bits.isFromPrefetch) && req_valid && (get_block(req.addr) === get_block(io.req.bits.addr)) && prefetch
852
853  when(io.prefetch_info.late_prefetch) {
854    prefetch := false.B
855  }
856
857  // refill latency monitor
858  val start_counting = GatedValidRegNext(io.mem_acquire.fire) || (GatedValidRegNextN(primary_fire, 2) && s_acquire)
859  io.latency_monitor.load_miss_refilling  := req_valid && req_primary_fire.isFromLoad     && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
860  io.latency_monitor.store_miss_refilling := req_valid && req_primary_fire.isFromStore    && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
861  io.latency_monitor.amo_miss_refilling   := req_valid && req_primary_fire.isFromAMO      && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
862  io.latency_monitor.pf_miss_refilling    := req_valid && req_primary_fire.isFromPrefetch && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
863
864  XSPerfAccumulate("miss_req_primary", primary_fire)
865  XSPerfAccumulate("miss_req_merged", secondary_fire)
866  XSPerfAccumulate("load_miss_penalty_to_use",
867    should_refill_data &&
868      BoolStopWatch(primary_fire, io.refill_to_ldq.valid, true)
869  )
870  XSPerfAccumulate("penalty_between_grantlast_and_release",
871    BoolStopWatch(!RegNext(w_grantlast) && w_grantlast, release_entry, true)
872  )
873  XSPerfAccumulate("main_pipe_penalty", BoolStopWatch(io.main_pipe_req.fire, io.main_pipe_resp))
874  XSPerfAccumulate("penalty_blocked_by_channel_A", io.mem_acquire.valid && !io.mem_acquire.ready)
875  XSPerfAccumulate("penalty_waiting_for_channel_D", s_acquire && !w_grantlast && !io.mem_grant.valid)
876  XSPerfAccumulate("penalty_waiting_for_channel_E", io.mem_finish.valid && !io.mem_finish.ready)
877  XSPerfAccumulate("prefetch_req_primary", primary_fire && io.req.bits.source === DCACHE_PREFETCH_SOURCE.U)
878  XSPerfAccumulate("prefetch_req_merged", secondary_fire && io.req.bits.source === DCACHE_PREFETCH_SOURCE.U)
879  XSPerfAccumulate("can_not_send_acquire_because_of_merging_store", !s_acquire && io.miss_req_pipe_reg.merge && io.miss_req_pipe_reg.cancel && miss_req_pipe_reg_bits.isFromStore)
880
881  val (mshr_penalty_sample, mshr_penalty) = TransactionLatencyCounter(GatedValidRegNextN(primary_fire, 2), release_entry)
882  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 0, 20, 1, true, true)
883  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 20, 100, 10, true, false)
884
885  val load_miss_begin = primary_fire && io.req.bits.isFromLoad
886  val refill_finished = GatedValidRegNext(!w_grantlast && refill_done) && should_refill_data
887  val (load_miss_penalty_sample, load_miss_penalty) = TransactionLatencyCounter(load_miss_begin, refill_finished) // not real refill finish time
888  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 0, 20, 1, true, true)
889  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 20, 100, 10, true, false)
890
891  val (a_to_d_penalty_sample, a_to_d_penalty) = TransactionLatencyCounter(start_counting, GatedValidRegNext(io.mem_grant.fire && refill_done))
892  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 0, 20, 1, true, true)
893  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 20, 100, 10, true, false)
894}
895
896class MissQueue(edge: TLEdgeOut, reqNum: Int)(implicit p: Parameters) extends DCacheModule
897  with HasPerfEvents
898  {
899  val io = IO(new Bundle {
900    val hartId = Input(UInt(hartIdLen.W))
901    val req = Flipped(DecoupledIO(new MissReq))
902    val resp = Output(new MissResp)
903    val refill_to_ldq = ValidIO(new Refill)
904
905    // cmo req
906    val cmo_req = Flipped(DecoupledIO(new CMOReq))
907    val cmo_resp = DecoupledIO(new CMOResp)
908
909    val queryMQ = Vec(reqNum, Flipped(new DCacheMQQueryIOBundle))
910
911    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
912    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
913    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
914
915    val l2_hint = Input(Valid(new L2ToL1Hint())) // Hint from L2 Cache
916
917    val main_pipe_req = DecoupledIO(new MainPipeReq)
918    val main_pipe_resp = Flipped(ValidIO(new MainPipeResp))
919
920    val mainpipe_info = Input(new MainPipeInfoToMQ)
921    val refill_info = ValidIO(new MissQueueRefillInfo)
922
923    // block probe
924    val probe_addr = Input(UInt(PAddrBits.W))
925    val probe_block = Output(Bool())
926
927    // block replace when release an addr valid in mshr
928    val replace_addr = Flipped(ValidIO(UInt(PAddrBits.W)))
929    val replace_block = Output(Bool())
930
931    // req blocked by wbq
932    val wbq_block_miss_req = Input(Bool())
933
934    val full = Output(Bool())
935
936    // forward missqueue
937    val forward = Vec(LoadPipelineWidth, new LduToMissqueueForwardIO)
938    val l2_pf_store_only = Input(Bool())
939
940    val memSetPattenDetected = Output(Bool())
941    val lqEmpty = Input(Bool())
942
943    val prefetch_info = new Bundle {
944      val naive = new Bundle {
945        val late_miss_prefetch = Output(Bool())
946      }
947
948      val fdp = new Bundle {
949        val late_miss_prefetch = Output(Bool())
950        val prefetch_monitor_cnt = Output(Bool())
951        val total_prefetch = Output(Bool())
952      }
953    }
954
955    val mq_enq_cancel = Output(Bool())
956
957    val debugTopDown = new DCacheTopDownIO
958  })
959
960  // 128KBL1: FIXME: provide vaddr for l2
961
962  val entries = Seq.fill(cfg.nMissEntries)(Module(new MissEntry(edge, reqNum)))
963  val cmo_unit = Module(new CMOUnit(edge))
964
965  val miss_req_pipe_reg = RegInit(0.U.asTypeOf(new MissReqPipeRegBundle(edge)))
966  val acquire_from_pipereg = Wire(chiselTypeOf(io.mem_acquire))
967
968  val primary_ready_vec = entries.map(_.io.primary_ready)
969  val secondary_ready_vec = entries.map(_.io.secondary_ready)
970  val secondary_reject_vec = entries.map(_.io.secondary_reject)
971  val probe_block_vec = entries.map { case e => e.io.block_addr.valid && e.io.block_addr.bits === io.probe_addr }
972
973  val merge = ParallelORR(Cat(secondary_ready_vec ++ Seq(miss_req_pipe_reg.merge_req(io.req.bits))))
974  val reject = ParallelORR(Cat(secondary_reject_vec ++ Seq(miss_req_pipe_reg.reject_req(io.req.bits))))
975  val alloc = !reject && !merge && ParallelORR(Cat(primary_ready_vec))
976  val accept = alloc || merge
977
978  // generate req_ready for each miss request for better timing
979  for (i <- 0 until reqNum) {
980    val _primary_ready_vec = entries.map(_.io.queryME(i).primary_ready)
981    val _secondary_ready_vec = entries.map(_.io.queryME(i).secondary_ready)
982    val _secondary_reject_vec = entries.map(_.io.queryME(i).secondary_reject)
983    val _merge = ParallelORR(Cat(_secondary_ready_vec ++ Seq(miss_req_pipe_reg.merge_req(io.queryMQ(i).req.bits))))
984    val _reject = ParallelORR(Cat(_secondary_reject_vec ++ Seq(miss_req_pipe_reg.reject_req(io.queryMQ(i).req.bits))))
985    val _alloc = !_reject && !_merge && ParallelORR(Cat(_primary_ready_vec))
986    val _accept = _alloc || _merge
987
988    io.queryMQ(i).ready := _accept
989  }
990
991  val req_mshr_handled_vec = entries.map(_.io.req_handled_by_this_entry)
992  // merged to pipeline reg
993  val req_pipeline_reg_handled = miss_req_pipe_reg.merge_req(io.req.bits) && io.req.valid
994  assert(PopCount(Seq(req_pipeline_reg_handled, VecInit(req_mshr_handled_vec).asUInt.orR)) <= 1.U, "miss req will either go to mshr or pipeline reg")
995  assert(PopCount(req_mshr_handled_vec) <= 1.U, "Only one mshr can handle a req")
996  io.resp.id := Mux(!req_pipeline_reg_handled, OHToUInt(req_mshr_handled_vec), miss_req_pipe_reg.mshr_id)
997  io.resp.handled := Cat(req_mshr_handled_vec).orR || req_pipeline_reg_handled
998  io.resp.merged := merge
999
1000  /*  MissQueue enq logic is now splitted into 2 cycles
1001   *
1002   */
1003  when(io.req.valid){
1004    miss_req_pipe_reg.req     := io.req.bits
1005  }
1006  // miss_req_pipe_reg.req     := io.req.bits
1007  miss_req_pipe_reg.alloc   := alloc && io.req.valid && !io.req.bits.cancel && !io.wbq_block_miss_req
1008  miss_req_pipe_reg.merge   := merge && io.req.valid && !io.req.bits.cancel && !io.wbq_block_miss_req
1009  miss_req_pipe_reg.cancel  := io.wbq_block_miss_req
1010  miss_req_pipe_reg.mshr_id := io.resp.id
1011
1012  assert(PopCount(Seq(alloc && io.req.valid, merge && io.req.valid)) <= 1.U, "allocate and merge a mshr in same cycle!")
1013
1014  val source_except_load_cnt = RegInit(0.U(10.W))
1015  when(VecInit(req_mshr_handled_vec).asUInt.orR || req_pipeline_reg_handled) {
1016    when(io.req.bits.isFromLoad) {
1017      source_except_load_cnt := 0.U
1018    }.otherwise {
1019      when(io.req.bits.isFromStore) {
1020        source_except_load_cnt := source_except_load_cnt + 1.U
1021      }
1022    }
1023  }
1024  val Threshold = 8
1025  val memSetPattenDetected = GatedValidRegNext((source_except_load_cnt >= Threshold.U) && io.lqEmpty)
1026
1027  io.memSetPattenDetected := memSetPattenDetected
1028
1029  val forwardInfo_vec = VecInit(entries.map(_.io.forwardInfo))
1030  (0 until LoadPipelineWidth).map(i => {
1031    val id = io.forward(i).mshrid
1032    val req_valid = io.forward(i).valid
1033    val paddr = io.forward(i).paddr
1034
1035    val (forward_mshr, forwardData) = forwardInfo_vec(id).forward(req_valid, paddr)
1036    io.forward(i).forward_result_valid := forwardInfo_vec(id).check(req_valid, paddr)
1037    io.forward(i).forward_mshr := forward_mshr
1038    io.forward(i).forwardData := forwardData
1039  })
1040
1041  assert(RegNext(PopCount(secondary_ready_vec) <= 1.U || !io.req.valid))
1042//  assert(RegNext(PopCount(secondary_reject_vec) <= 1.U))
1043  // It is possible that one mshr wants to merge a req, while another mshr wants to reject it.
1044  // That is, a coming req has the same paddr as that of mshr_0 (merge),
1045  // while it has the same set and the same way as mshr_1 (reject).
1046  // In this situation, the coming req should be merged by mshr_0
1047//  assert(RegNext(PopCount(Seq(merge, reject)) <= 1.U))
1048
1049  def select_valid_one[T <: Bundle](
1050    in: Seq[DecoupledIO[T]],
1051    out: DecoupledIO[T],
1052    name: Option[String] = None): Unit = {
1053
1054    if (name.nonEmpty) { out.suggestName(s"${name.get}_select") }
1055    out.valid := Cat(in.map(_.valid)).orR
1056    out.bits := ParallelMux(in.map(_.valid) zip in.map(_.bits))
1057    in.map(_.ready := out.ready)
1058    assert(!RegNext(out.valid && PopCount(Cat(in.map(_.valid))) > 1.U))
1059  }
1060
1061  io.mem_grant.ready := false.B
1062
1063  val nMaxPrefetchEntry = Constantin.createRecord(s"nMaxPrefetchEntry${p(XSCoreParamsKey).HartId}", initValue = 14)
1064  entries.zipWithIndex.foreach {
1065    case (e, i) =>
1066      val former_primary_ready = if(i == 0)
1067        false.B
1068      else
1069        Cat((0 until i).map(j => entries(j).io.primary_ready)).orR
1070
1071      e.io.hartId := io.hartId
1072      e.io.id := i.U
1073      e.io.l2_pf_store_only := io.l2_pf_store_only
1074      e.io.req.valid := io.req.valid
1075      e.io.wbq_block_miss_req := io.wbq_block_miss_req
1076      e.io.primary_valid := io.req.valid &&
1077        !merge &&
1078        !reject &&
1079        !former_primary_ready &&
1080        e.io.primary_ready
1081      e.io.req.bits := io.req.bits.toMissReqWoStoreData()
1082
1083      e.io.mem_grant.valid := false.B
1084      e.io.mem_grant.bits := DontCare
1085      when (io.mem_grant.bits.source === i.U) {
1086        e.io.mem_grant <> io.mem_grant
1087      }
1088
1089      when(miss_req_pipe_reg.reg_valid() && miss_req_pipe_reg.mshr_id === i.U) {
1090        e.io.miss_req_pipe_reg := miss_req_pipe_reg
1091      }.otherwise {
1092        e.io.miss_req_pipe_reg       := DontCare
1093        e.io.miss_req_pipe_reg.merge := false.B
1094        e.io.miss_req_pipe_reg.alloc := false.B
1095      }
1096
1097      e.io.acquire_fired_by_pipe_reg := acquire_from_pipereg.fire
1098
1099      e.io.main_pipe_resp := io.main_pipe_resp.valid && io.main_pipe_resp.bits.ack_miss_queue && io.main_pipe_resp.bits.miss_id === i.U
1100      e.io.main_pipe_replay := io.mainpipe_info.s2_valid && io.mainpipe_info.s2_replay_to_mq && io.mainpipe_info.s2_miss_id === i.U
1101      e.io.main_pipe_refill_resp := io.mainpipe_info.s3_valid && io.mainpipe_info.s3_refill_resp && io.mainpipe_info.s3_miss_id === i.U
1102
1103      e.io.memSetPattenDetected := memSetPattenDetected
1104      e.io.nMaxPrefetchEntry := nMaxPrefetchEntry
1105
1106      e.io.main_pipe_req.ready := io.main_pipe_req.ready
1107
1108      for (j <- 0 until reqNum) {
1109        e.io.queryME(j).req.valid := io.queryMQ(j).req.valid
1110        e.io.queryME(j).req.bits  := io.queryMQ(j).req.bits.toMissReqWoStoreData()
1111      }
1112
1113      when(io.l2_hint.bits.sourceId === i.U) {
1114        e.io.l2_hint <> io.l2_hint
1115      } .otherwise {
1116        e.io.l2_hint.valid := false.B
1117        e.io.l2_hint.bits := DontCare
1118      }
1119  }
1120
1121  cmo_unit.io.req <> io.cmo_req
1122  io.cmo_resp <> cmo_unit.io.resp_to_lsq
1123  when (io.mem_grant.valid && io.mem_grant.bits.opcode === TLMessages.CBOAck) {
1124    cmo_unit.io.resp_chanD <> io.mem_grant
1125  } .otherwise {
1126    cmo_unit.io.resp_chanD.valid := false.B
1127    cmo_unit.io.resp_chanD.bits := DontCare
1128  }
1129
1130  io.req.ready := accept
1131  io.mq_enq_cancel := io.req.bits.cancel
1132  io.refill_to_ldq.valid := Cat(entries.map(_.io.refill_to_ldq.valid)).orR
1133  io.refill_to_ldq.bits := ParallelMux(entries.map(_.io.refill_to_ldq.valid) zip entries.map(_.io.refill_to_ldq.bits))
1134
1135  io.refill_info.valid := VecInit(entries.zipWithIndex.map{ case(e,i) => e.io.refill_info.valid && io.mainpipe_info.s2_valid && io.mainpipe_info.s2_miss_id === i.U}).asUInt.orR
1136  io.refill_info.bits := Mux1H(entries.zipWithIndex.map{ case(e,i) => (io.mainpipe_info.s2_miss_id === i.U) -> e.io.refill_info.bits })
1137
1138  acquire_from_pipereg.valid := miss_req_pipe_reg.can_send_acquire(io.req.valid, io.req.bits)
1139  acquire_from_pipereg.bits := miss_req_pipe_reg.get_acquire(io.l2_pf_store_only)
1140
1141  XSPerfAccumulate("acquire_fire_from_pipereg", acquire_from_pipereg.fire)
1142  XSPerfAccumulate("pipereg_valid", miss_req_pipe_reg.reg_valid())
1143
1144  val acquire_sources = Seq(cmo_unit.io.req_chanA, acquire_from_pipereg) ++ entries.map(_.io.mem_acquire)
1145  TLArbiter.lowest(edge, io.mem_acquire, acquire_sources:_*)
1146  TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*)
1147
1148  // amo's main pipe req out
1149  fastArbiter(entries.map(_.io.main_pipe_req), io.main_pipe_req, Some("main_pipe_req"))
1150
1151  io.probe_block := Cat(probe_block_vec).orR
1152
1153  io.replace_block := io.replace_addr.valid && Cat(entries.map(e => e.io.req_addr.valid && e.io.req_addr.bits === io.replace_addr.bits) ++ Seq(miss_req_pipe_reg.block_match(io.replace_addr.bits))).orR
1154
1155  io.full := ~Cat(entries.map(_.io.primary_ready)).andR
1156
1157  // prefetch related
1158  io.prefetch_info.naive.late_miss_prefetch := io.req.valid && io.req.bits.isPrefetchRead && (miss_req_pipe_reg.matched(io.req.bits) || Cat(entries.map(_.io.matched)).orR)
1159
1160  io.prefetch_info.fdp.late_miss_prefetch := (miss_req_pipe_reg.prefetch_late_en(io.req.bits.toMissReqWoStoreData(), io.req.valid) || Cat(entries.map(_.io.prefetch_info.late_prefetch)).orR)
1161  io.prefetch_info.fdp.prefetch_monitor_cnt := io.main_pipe_req.fire
1162  io.prefetch_info.fdp.total_prefetch := alloc && io.req.valid && !io.req.bits.cancel && isFromL1Prefetch(io.req.bits.pf_source)
1163
1164  // L1MissTrace Chisel DB
1165  val debug_miss_trace = Wire(new L1MissTrace)
1166  debug_miss_trace.vaddr := io.req.bits.vaddr
1167  debug_miss_trace.paddr := io.req.bits.addr
1168  debug_miss_trace.source := io.req.bits.source
1169  debug_miss_trace.pc := io.req.bits.pc
1170
1171  val isWriteL1MissQMissTable = Constantin.createRecord(s"isWriteL1MissQMissTable${p(XSCoreParamsKey).HartId}")
1172  val table = ChiselDB.createTable(s"L1MissQMissTrace_hart${p(XSCoreParamsKey).HartId}", new L1MissTrace)
1173  table.log(debug_miss_trace, isWriteL1MissQMissTable.orR && io.req.valid && !io.req.bits.cancel && alloc, "MissQueue", clock, reset)
1174
1175  // Difftest
1176  if (env.EnableDifftest) {
1177    val difftest = DifftestModule(new DiffRefillEvent, dontCare = true)
1178    difftest.coreid := io.hartId
1179    difftest.index := 1.U
1180    difftest.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done
1181    difftest.addr := io.refill_to_ldq.bits.addr
1182    difftest.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.data)
1183    difftest.idtfr := DontCare
1184  }
1185
1186  // Perf count
1187  XSPerfAccumulate("miss_req", io.req.fire && !io.req.bits.cancel)
1188  XSPerfAccumulate("miss_req_allocate", io.req.fire && !io.req.bits.cancel && alloc)
1189  XSPerfAccumulate("miss_req_load_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromLoad)
1190  XSPerfAccumulate("miss_req_store_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromStore)
1191  XSPerfAccumulate("miss_req_amo_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromAMO)
1192  XSPerfAccumulate("miss_req_prefetch_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromPrefetch)
1193  XSPerfAccumulate("miss_req_merge_load", io.req.fire && !io.req.bits.cancel && merge && io.req.bits.isFromLoad)
1194  XSPerfAccumulate("miss_req_reject_load", io.req.valid && !io.req.bits.cancel && reject && io.req.bits.isFromLoad)
1195  XSPerfAccumulate("probe_blocked_by_miss", io.probe_block)
1196  XSPerfAccumulate("prefetch_primary_fire", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromPrefetch)
1197  XSPerfAccumulate("prefetch_secondary_fire", io.req.fire && !io.req.bits.cancel && merge && io.req.bits.isFromPrefetch)
1198  XSPerfAccumulate("memSetPattenDetected", memSetPattenDetected)
1199  val max_inflight = RegInit(0.U((log2Up(cfg.nMissEntries) + 1).W))
1200  val num_valids = PopCount(~Cat(primary_ready_vec).asUInt)
1201  when (num_valids > max_inflight) {
1202    max_inflight := num_valids
1203  }
1204  // max inflight (average) = max_inflight_total / cycle cnt
1205  XSPerfAccumulate("max_inflight", max_inflight)
1206  QueuePerf(cfg.nMissEntries, num_valids, num_valids === cfg.nMissEntries.U)
1207  io.full := num_valids === cfg.nMissEntries.U
1208  XSPerfHistogram("num_valids", num_valids, true.B, 0, cfg.nMissEntries, 1)
1209
1210  XSPerfHistogram("L1DMLP_CPUData", PopCount(VecInit(entries.map(_.io.perf_pending_normal)).asUInt), true.B, 0, cfg.nMissEntries, 1)
1211  XSPerfHistogram("L1DMLP_Prefetch", PopCount(VecInit(entries.map(_.io.perf_pending_prefetch)).asUInt), true.B, 0, cfg.nMissEntries, 1)
1212  XSPerfHistogram("L1DMLP_Total", num_valids, true.B, 0, cfg.nMissEntries, 1)
1213
1214  XSPerfAccumulate("miss_load_refill_latency", PopCount(entries.map(_.io.latency_monitor.load_miss_refilling)))
1215  XSPerfAccumulate("miss_store_refill_latency", PopCount(entries.map(_.io.latency_monitor.store_miss_refilling)))
1216  XSPerfAccumulate("miss_amo_refill_latency", PopCount(entries.map(_.io.latency_monitor.amo_miss_refilling)))
1217  XSPerfAccumulate("miss_pf_refill_latency", PopCount(entries.map(_.io.latency_monitor.pf_miss_refilling)))
1218
1219  val rob_head_miss_in_dcache = VecInit(entries.map(_.io.rob_head_query.resp)).asUInt.orR
1220
1221  entries.foreach {
1222    case e => {
1223      e.io.rob_head_query.query_valid := io.debugTopDown.robHeadVaddr.valid
1224      e.io.rob_head_query.vaddr := io.debugTopDown.robHeadVaddr.bits
1225    }
1226  }
1227
1228  io.debugTopDown.robHeadMissInDCache := rob_head_miss_in_dcache
1229
1230  val perfValidCount = RegNext(PopCount(entries.map(entry => (!entry.io.primary_ready))))
1231  val perfEvents = Seq(
1232    ("dcache_missq_req      ", io.req.fire),
1233    ("dcache_missq_1_4_valid", (perfValidCount < (cfg.nMissEntries.U/4.U))),
1234    ("dcache_missq_2_4_valid", (perfValidCount > (cfg.nMissEntries.U/4.U)) & (perfValidCount <= (cfg.nMissEntries.U/2.U))),
1235    ("dcache_missq_3_4_valid", (perfValidCount > (cfg.nMissEntries.U/2.U)) & (perfValidCount <= (cfg.nMissEntries.U*3.U/4.U))),
1236    ("dcache_missq_4_4_valid", (perfValidCount > (cfg.nMissEntries.U*3.U/4.U))),
1237  )
1238  generatePerfEvent()
1239}