xref: /XiangShan/src/main/scala/xiangshan/frontend/icache/IPrefetch.scala (revision b808ac73a4385d9040cc34856c656e2058933e3f)
1/***************************************************************************************
2  * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3  * Copyright (c) 2020-2021 Peng Cheng Laboratory
4  *
5  * XiangShan is licensed under Mulan PSL v2.
6  * You can use this software according to the terms and conditions of the Mulan PSL v2.
7  * You may obtain a copy of Mulan PSL v2 at:
8  *          http://license.coscl.org.cn/MulanPSL2
9  *
10  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11  * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12  * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13  *
14  * See the Mulan PSL v2 for more details.
15  ***************************************************************************************/
16
17package xiangshan.frontend.icache
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import difftest._
23import freechips.rocketchip.tilelink._
24import utils._
25import xiangshan.cache.mmu._
26import xiangshan.frontend._
27import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
28import huancun.PreferCacheKey
29import xiangshan.XSCoreParamsKey
30import utility._
31
32abstract class IPrefetchBundle(implicit p: Parameters) extends ICacheBundle
33abstract class IPrefetchModule(implicit p: Parameters) extends ICacheModule
34
35class IPredfetchIO(implicit p: Parameters) extends IPrefetchBundle {
36  // control
37  val csr_pf_enable     = Input(Bool())
38  val flush             = Input(Bool())
39
40  val ftqReq            = Flipped(new FtqToPrefetchIO)
41  val itlb              = Vec(PortNumber, new TlbRequestIO)
42  val pmp               = Vec(PortNumber, new ICachePMPBundle)
43  val metaRead          = new ICacheMetaReqBundle
44  val MSHRReq           = DecoupledIO(new ICacheMissReq)
45  val MSHRResp          = Flipped(ValidIO(new ICacheMissResp))
46  val wayLookupWrite    = DecoupledIO(new WayLookupInfo)
47}
48
49class IPrefetchPipe(implicit p: Parameters) extends  IPrefetchModule
50{
51  val io = IO(new IPredfetchIO)
52
53  val fromFtq = io.ftqReq
54  val (toITLB,  fromITLB) = (io.itlb.map(_.req), io.itlb.map(_.resp))
55  val (toPMP,  fromPMP)   = (io.pmp.map(_.req), io.pmp.map(_.resp))
56  val (toMeta,  fromMeta) = (io.metaRead.toIMeta,  io.metaRead.fromIMeta)
57  val (toMSHR, fromMSHR)  = (io.MSHRReq, io.MSHRResp)
58  val toWayLookup = io.wayLookupWrite
59
60  val enableBit = RegInit(false.B)
61  enableBit := io.csr_pf_enable
62
63  val s0_fire, s1_fire, s2_fire             = WireInit(false.B)
64  val s0_discard, s2_discard                = WireInit(false.B)
65  val s0_ready, s1_ready, s2_ready          = WireInit(false.B)
66  val s0_flush, s1_flush, s2_flush          = WireInit(false.B)
67  val from_bpu_s0_flush, from_bpu_s1_flush  = WireInit(false.B)
68
69  /**
70    ******************************************************************************
71    * IPrefetch Stage 0
72    * - 1. receive ftq req
73    * - 2. send req to ITLB
74    * - 3. send req to Meta SRAM
75    ******************************************************************************
76    */
77  val s0_valid  = fromFtq.req.valid
78
79  /**
80    ******************************************************************************
81    * receive ftq req
82    ******************************************************************************
83    */
84  val s0_req_vaddr    = VecInit(Seq(fromFtq.req.bits.startAddr, fromFtq.req.bits.nextlineStart))
85  val s0_req_ftqIdx   = fromFtq.req.bits.ftqIdx
86  val s0_doubleline   = fromFtq.req.bits.crossCacheline
87  val s0_req_vSetIdx  = s0_req_vaddr.map(get_idx(_))
88
89  from_bpu_s0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(s0_req_ftqIdx) ||
90                       fromFtq.flushFromBpu.shouldFlushByStage3(s0_req_ftqIdx)
91  s0_flush := io.flush || from_bpu_s0_flush || s1_flush
92
93  val s0_can_go = s1_ready && toITLB(0).ready && toITLB(1).ready && toMeta.ready
94  fromFtq.req.ready := s0_can_go
95
96  s0_fire := s0_valid && s0_can_go && !s0_flush
97
98  /**
99    ******************************************************************************
100    * IPrefetch Stage 1
101    * - 1. Receive resp from ITLB
102    * - 2. Receive resp from IMeta and check
103    * - 3. Monitor the requests from missUnit to write to SRAM.
104    * - 4. Wirte wayLookup
105    ******************************************************************************
106    */
107  val s1_valid = generatePipeControl(lastFire = s0_fire, thisFire = s1_fire, thisFlush = s1_flush, lastFlush = false.B)
108
109  val s1_req_vaddr    = RegEnable(s0_req_vaddr, 0.U.asTypeOf(s0_req_vaddr), s0_fire)
110  val s1_doubleline   = RegEnable(s0_doubleline, 0.U.asTypeOf(s0_doubleline), s0_fire)
111  val s1_req_ftqIdx   = RegEnable(s0_req_ftqIdx, 0.U.asTypeOf(s0_req_ftqIdx), s0_fire)
112  val s1_req_vSetIdx  = VecInit(s1_req_vaddr.map(get_idx(_)))
113
114  val m_idle :: m_itlbResend :: m_metaResend :: m_enqWay :: m_enterS2 :: Nil = Enum(5)
115  val state = RegInit(m_idle)
116  val next_state = WireDefault(state)
117  val s0_fire_r = RegNext(s0_fire)
118  dontTouch(state)
119  dontTouch(next_state)
120  state := next_state
121
122  /**
123    ******************************************************************************
124    * resend itlb req if miss
125    ******************************************************************************
126    */
127  val s1_wait_itlb  = RegInit(VecInit(Seq.fill(PortNumber)(false.B)))
128  (0 until PortNumber).foreach { i =>
129    when(s1_flush) {
130      s1_wait_itlb(i) := false.B
131    }.elsewhen(RegNext(s0_fire) && fromITLB(i).bits.miss) {
132      s1_wait_itlb(i) := true.B
133    }.elsewhen(s1_wait_itlb(i) && !fromITLB(i).bits.miss) {
134      s1_wait_itlb(i) := false.B
135    }
136  }
137  val s1_need_itlb    = VecInit(Seq((RegNext(s0_fire) || s1_wait_itlb(0)) && fromITLB(0).bits.miss,
138                                    (RegNext(s0_fire) || s1_wait_itlb(1)) && fromITLB(1).bits.miss && s1_doubleline))
139  val tlb_valid_pulse = VecInit(Seq((RegNext(s0_fire) || s1_wait_itlb(0)) && !fromITLB(0).bits.miss,
140                                    (RegNext(s0_fire) || s1_wait_itlb(1)) && !fromITLB(1).bits.miss && s1_doubleline))
141  val tlb_valid_latch = VecInit((0 until PortNumber).map(i => ValidHoldBypass(tlb_valid_pulse(i), s1_fire, flush=s1_flush)))
142  val itlb_finish     = tlb_valid_latch(0) && (!s1_doubleline || tlb_valid_latch(1))
143
144  for (i <- 0 until PortNumber) {
145    toITLB(i).valid             := s1_need_itlb(i) || (s0_valid && (if(i == 0) true.B else s0_doubleline))
146    toITLB(i).bits              := DontCare
147    toITLB(i).bits.size         := 3.U
148    toITLB(i).bits.vaddr        := Mux(s1_need_itlb(i), s1_req_vaddr(i), s0_req_vaddr(i))
149    toITLB(i).bits.debug.pc     := Mux(s1_need_itlb(i), s1_req_vaddr(i), s0_req_vaddr(i))
150    toITLB(i).bits.cmd          := TlbCmd.exec
151    toITLB(i).bits.no_translate := false.B
152  }
153  fromITLB.foreach(_.ready := true.B)
154  io.itlb.foreach(_.req_kill := false.B)
155
156  /**
157    ******************************************************************************
158    * Receive resp from ITLB
159    ******************************************************************************
160    */
161  val s1_req_paddr_wire   = VecInit(fromITLB.map(_.bits.paddr(0)))
162  val s1_req_paddr_reg    = VecInit((0 until PortNumber).map(i =>
163                                RegEnable(s1_req_paddr_wire(i), 0.U(PAddrBits.W), tlb_valid_pulse(i))))
164  val s1_req_paddr        = VecInit((0 until PortNumber).map(i =>
165                                Mux(tlb_valid_pulse(i), s1_req_paddr_wire(i), s1_req_paddr_reg(i))))
166  val s1_req_gpaddr_tmp   = VecInit((0 until PortNumber).map(i =>
167                                ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U.asTypeOf(fromITLB(i).bits.gpaddr(0)), data = fromITLB(i).bits.gpaddr(0))))
168  val itlbExcpPF          = VecInit((0 until PortNumber).map(i =>
169                                ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U.asTypeOf(fromITLB(i).bits.excp(0).pf.instr), data = fromITLB(i).bits.excp(0).pf.instr)))
170  val itlbExcpGPF         = VecInit((0 until PortNumber).map(i =>
171                                ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U.asTypeOf(fromITLB(i).bits.excp(0).gpf.instr), data = fromITLB(i).bits.excp(0).gpf.instr)))
172  val itlbExcpAF          = VecInit((0 until PortNumber).map(i =>
173                                ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U.asTypeOf(fromITLB(i).bits.excp(0).af.instr), data = fromITLB(i).bits.excp(0).af.instr)))
174  val itlbExcp            = VecInit((0 until PortNumber).map(i => itlbExcpAF(i) || itlbExcpPF(i) || itlbExcpGPF(i)))
175
176  /* Select gpaddr with the first gpf
177   * Note: the backend wants the base guest physical address of a fetch block
178   *       for port(i), its base gpaddr is actually (gpaddr - i * blocksize)
179   *       see GPAMem: https://github.com/OpenXiangShan/XiangShan/blob/344cf5d55568dd40cd658a9ee66047a505eeb504/src/main/scala/xiangshan/backend/GPAMem.scala#L33-L34
180   *       see also: https://github.com/OpenXiangShan/XiangShan/blob/344cf5d55568dd40cd658a9ee66047a505eeb504/src/main/scala/xiangshan/frontend/IFU.scala#L374-L375
181   */
182  val s1_req_gpaddr = PriorityMuxDefault(
183    itlbExcpGPF zip (0 until PortNumber).map(i => s1_req_gpaddr_tmp(i) - (i << blockOffBits).U),
184    0.U.asTypeOf(s1_req_gpaddr_tmp(0))
185  )
186
187  /**
188    ******************************************************************************
189    * resend metaArray read req when itlb miss finish
190    ******************************************************************************
191    */
192  val s1_need_meta = ((state === m_itlbResend) && itlb_finish) || (state === m_metaResend)
193  toMeta.valid              := s1_need_meta || s0_valid
194  toMeta.bits               := DontCare
195  toMeta.bits.isDoubleLine  := Mux(s1_need_meta, s1_doubleline, s0_doubleline)
196
197  for (i <- 0 until PortNumber) {
198    toMeta.bits.vSetIdx(i)  := Mux(s1_need_meta, s1_req_vSetIdx(i), s0_req_vSetIdx(i))
199  }
200
201  /**
202    ******************************************************************************
203    * Receive resp from IMeta and check
204    ******************************************************************************
205    */
206  val s1_req_ptags    = VecInit(s1_req_paddr.map(get_phy_tag(_)))
207
208  val s1_meta_ptags   = fromMeta.tags
209  val s1_meta_valids  = fromMeta.entryValid
210  val s1_meta_errors = VecInit((0 until PortNumber).map( p =>
211    // If error is found in either way, the tag_eq_vec is unreliable, so we do not use waymask, but directly .orR
212    fromMeta.errors(p).asUInt.orR
213  ))
214
215  def get_waymask(paddrs: Vec[UInt]): Vec[UInt] = {
216    val ptags         = paddrs.map(get_phy_tag(_))
217    val tag_eq_vec    = VecInit((0 until PortNumber).map( p => VecInit((0 until nWays).map( w => s1_meta_ptags(p)(w) === ptags(p)))))
218    val tag_match_vec = VecInit((0 until PortNumber).map( k => VecInit(tag_eq_vec(k).zipWithIndex.map{ case(way_tag_eq, w) => way_tag_eq && s1_meta_valids(k)(w)})))
219    val waymasks      = VecInit(tag_match_vec.map(_.asUInt))
220    waymasks
221  }
222
223  val s1_SRAM_waymasks = VecInit((0 until PortNumber).map(i =>
224                            Mux(tlb_valid_pulse(i), get_waymask(s1_req_paddr_wire)(i), get_waymask(s1_req_paddr_reg)(i))))
225
226  /**
227    ******************************************************************************
228    * update waymask according to MSHR update data
229    ******************************************************************************
230    */
231  def update_waymask(mask: UInt, vSetIdx: UInt, ptag: UInt): UInt = {
232    require(mask.getWidth == nWays)
233    val new_mask  = WireInit(mask)
234    val valid = fromMSHR.valid && !fromMSHR.bits.corrupt
235    val vset_same = fromMSHR.bits.vSetIdx === vSetIdx
236    val ptag_same = getPhyTagFromBlk(fromMSHR.bits.blkPaddr) === ptag
237    val way_same  = fromMSHR.bits.waymask === mask
238    when(valid && vset_same) {
239      when(ptag_same) {
240        new_mask := fromMSHR.bits.waymask
241      }.elsewhen(way_same) {
242        new_mask := 0.U
243      }
244    }
245    new_mask
246  }
247
248  val s1_SRAM_valid = s0_fire_r || RegNext(s1_need_meta && toMeta.ready)
249  val s1_MSHR_valid = fromMSHR.valid && !fromMSHR.bits.corrupt
250  val s1_waymasks   = WireInit(VecInit(Seq.fill(PortNumber)(0.U(nWays.W))))
251  val s1_waymasks_r = RegEnable(s1_waymasks, 0.U.asTypeOf(s1_waymasks), s1_SRAM_valid || s1_MSHR_valid)
252  (0 until PortNumber).foreach{i =>
253    val old_waymask = Mux(s1_SRAM_valid, s1_SRAM_waymasks(i), s1_waymasks_r(i))
254    s1_waymasks(i) := update_waymask(old_waymask, s1_req_vSetIdx(i), s1_req_ptags(i))
255  }
256
257  /**
258    ******************************************************************************
259    * send enqueu req to WayLookup
260    ******** **********************************************************************
261    */
262  // Disallow enqueuing wayLookup when SRAM write occurs.
263  toWayLookup.valid             := ((state === m_enqWay) || ((state === m_idle) && itlb_finish)) && !s1_flush && !fromMSHR.valid
264  toWayLookup.bits.vSetIdx      := s1_req_vSetIdx
265  toWayLookup.bits.waymask      := s1_waymasks
266  toWayLookup.bits.ptag         := s1_req_ptags
267  toWayLookup.bits.gpaddr       := s1_req_gpaddr
268  (0 until PortNumber).foreach { i =>
269    val excpValid = (if (i == 0) true.B else s1_doubleline)  // exception in first line is always valid, in second line is valid iff is doubleline request
270    toWayLookup.bits.excp_tlb_af(i)  := excpValid && itlbExcpAF(i)
271    toWayLookup.bits.excp_tlb_pf(i)  := excpValid && itlbExcpPF(i)
272    toWayLookup.bits.excp_tlb_gpf(i) := excpValid && itlbExcpGPF(i)
273    toWayLookup.bits.meta_errors(i)  := excpValid && s1_meta_errors(i)
274  }
275
276  val s1_waymasks_vec = s1_waymasks.map(_.asTypeOf(Vec(nWays, Bool())))
277  when(toWayLookup.fire) {
278    assert(PopCount(s1_waymasks_vec(0)) <= 1.U && (PopCount(s1_waymasks_vec(1)) <= 1.U || !s1_doubleline),
279      "Multiple hit in main pipe, port0:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x port1:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x ",
280      PopCount(s1_waymasks_vec(0)) > 1.U, s1_req_ptags(0), get_idx(s1_req_vaddr(0)), s1_req_vaddr(0),
281      PopCount(s1_waymasks_vec(1)) > 1.U && s1_doubleline, s1_req_ptags(1), get_idx(s1_req_vaddr(1)), s1_req_vaddr(1))
282  }
283
284  /**
285    ******************************************************************************
286    * PMP check
287    ******************************************************************************
288    */
289  toPMP.zipWithIndex.map { case (p, i) =>
290    p.valid     := s1_valid
291    p.bits.addr := s1_req_paddr(i)
292    p.bits.size := 3.U // TODO
293    p.bits.cmd  := TlbCmd.exec
294  }
295  val pmpExcp = VecInit((0 until PortNumber).map( i => fromPMP(i).instr || fromPMP(i).mmio ))
296
297  /**
298    ******************************************************************************
299    * state machine
300    ******** **********************************************************************
301    */
302
303  switch(state) {
304    is(m_idle) {
305      when(s1_valid && !itlb_finish) {
306        next_state := m_itlbResend
307      }.elsewhen(s1_valid && itlb_finish && !toWayLookup.fire) {
308        next_state := m_enqWay
309      }.elsewhen(s1_valid && itlb_finish && toWayLookup.fire && !s2_ready) {
310        next_state := m_enterS2
311      }
312    }
313    is(m_itlbResend) {
314      when(itlb_finish && !toMeta.ready) {
315        next_state := m_metaResend
316      }.elsewhen(itlb_finish && toMeta.ready) {
317        next_state := m_enqWay
318      }
319    }
320    is(m_metaResend) {
321      when(toMeta.ready) {
322        next_state := m_enqWay
323      }
324    }
325    is(m_enqWay) {
326      when(toWayLookup.fire && !s2_ready) {
327        next_state := m_enterS2
328      }.elsewhen(toWayLookup.fire && s2_ready) {
329        next_state := m_idle
330      }
331    }
332    is(m_enterS2) {
333      when(s2_ready) {
334        next_state := m_idle
335      }
336    }
337  }
338
339  when(s1_flush) {
340    next_state := m_idle
341  }
342
343  /** Stage 1 control */
344  from_bpu_s1_flush := s1_valid && fromFtq.flushFromBpu.shouldFlushByStage3(s1_req_ftqIdx)
345  s1_flush := io.flush || from_bpu_s1_flush
346
347  s1_ready      := next_state === m_idle
348  s1_fire       := (next_state === m_idle) && s1_valid && !s1_flush
349
350  /**
351    ******************************************************************************
352    * IPrefetch Stage 2
353    * - 1. Monitor the requests from missUnit to write to SRAM.
354    * - 2. send req to missUnit
355    ******************************************************************************
356    */
357  val s2_valid  = generatePipeControl(lastFire = s1_fire, thisFire = s2_fire, thisFlush = s2_flush, lastFlush = false.B)
358
359  val s2_req_vaddr    = RegEnable(s1_req_vaddr, 0.U.asTypeOf(s1_req_vaddr), s1_fire)
360  val s2_doubleline   = RegEnable(s1_doubleline, 0.U.asTypeOf(s1_doubleline), s1_fire)
361  val s2_req_paddr    = RegEnable(s1_req_paddr, 0.U.asTypeOf(s1_req_paddr), s1_fire)
362
363  val s2_pmpExcp      = RegEnable(pmpExcp, 0.U.asTypeOf(pmpExcp), s1_fire)
364  val s2_itlbExcp     = RegEnable(itlbExcp, 0.U.asTypeOf(itlbExcp), s1_fire)
365  val s2_waymasks     = RegEnable(s1_waymasks, 0.U.asTypeOf(s1_waymasks), s1_fire)
366
367  val s2_req_vSetIdx  = s2_req_vaddr.map(get_idx(_))
368  val s2_req_ptags    = s2_req_paddr.map(get_phy_tag(_))
369
370  /**
371    ******************************************************************************
372    * Monitor the requests from missUnit to write to SRAM
373    ******************************************************************************
374    */
375
376  /* NOTE: If fromMSHR.bits.corrupt, we should set s2_MSHR_hits to false.B, and send prefetch requests again.
377   * This is the opposite of how mainPipe handles fromMSHR.bits.corrupt,
378   *   in which we should set s2_MSHR_hits to true.B, and send error to ifu.
379   */
380  val s2_MSHR_match = VecInit((0 until PortNumber).map(i =>
381    (s2_req_vSetIdx(i) === fromMSHR.bits.vSetIdx) &&
382    (s2_req_ptags(i) === getPhyTagFromBlk(fromMSHR.bits.blkPaddr)) &&
383    s2_valid && fromMSHR.valid && !fromMSHR.bits.corrupt
384  ))
385  val s2_MSHR_hits = (0 until PortNumber).map(i => ValidHoldBypass(s2_MSHR_match(i), s2_fire || s2_flush))
386
387  val s2_SRAM_hits = s2_waymasks.map(_.orR)
388  val s2_hits = VecInit((0 until PortNumber).map(i => s2_MSHR_hits(i) || s2_SRAM_hits(i)))
389
390  // pmpExcp includes access fault and mmio, neither of which should be prefetched
391  // also, if port0 has exception, port1 should not be prefetched
392  // miss = this port not hit && need this port && no exception found before and in this port
393  val s2_miss = VecInit((0 until PortNumber).map { i =>
394    !s2_hits(i) && (if (i==0) true.B else s2_doubleline) &&
395      !s2_itlbExcp.take(i+1).reduce(_||_) && !s2_pmpExcp.take(i+1).reduce(_||_)
396  })
397
398  /**
399    ******************************************************************************
400    * send req to missUnit
401    ******************************************************************************
402    */
403  val toMSHRArbiter = Module(new Arbiter(new ICacheMissReq, PortNumber))
404
405  // To avoid sending duplicate requests.
406  val has_send = RegInit(VecInit(Seq.fill(PortNumber)(false.B)))
407  (0 until PortNumber).foreach{ i =>
408    when(s1_fire) {
409      has_send(i) := false.B
410    }.elsewhen(toMSHRArbiter.io.in(i).fire) {
411      has_send(i) := true.B
412    }
413  }
414
415  (0 until PortNumber).map{ i =>
416    toMSHRArbiter.io.in(i).valid          := s2_valid && s2_miss(i) && !has_send(i)
417    toMSHRArbiter.io.in(i).bits.blkPaddr  := getBlkAddr(s2_req_paddr(i))
418    toMSHRArbiter.io.in(i).bits.vSetIdx   := s2_req_vSetIdx(i)
419  }
420
421  toMSHR <> toMSHRArbiter.io.out
422
423  s2_flush := io.flush
424
425  val s2_finish  = (0 until PortNumber).map(i => has_send(i) || !s2_miss(i) || toMSHRArbiter.io.in(i).fire).reduce(_&&_)
426  s2_ready      := s2_finish || !s2_valid
427  s2_fire       := s2_valid && s2_finish && !s2_flush
428
429  /** PerfAccumulate */
430  // the number of prefetch request received from ftq
431  XSPerfAccumulate("prefetch_req_receive", fromFtq.req.fire)
432  // the number of prefetch request sent to missUnit
433  XSPerfAccumulate("prefetch_req_send", toMSHR.fire)
434  XSPerfAccumulate("to_missUnit_stall", toMSHR.valid && !toMSHR.ready)
435  /**
436    * Count the number of requests that are filtered for various reasons.
437    * The number of prefetch discard in Performance Accumulator may be
438    * a littel larger the number of really discarded. Because there can
439    * be multiple reasons for a canceled request at the same time.
440    */
441  // discard prefetch request by flush
442  // XSPerfAccumulate("fdip_prefetch_discard_by_tlb_except",  p1_discard && p1_tlb_except)
443  // // discard prefetch request by hit icache SRAM
444  // XSPerfAccumulate("fdip_prefetch_discard_by_hit_cache",   p2_discard && p1_meta_hit)
445  // // discard prefetch request by hit wirte SRAM
446  // XSPerfAccumulate("fdip_prefetch_discard_by_p1_monoitor", p1_discard && p1_monitor_hit)
447  // // discard prefetch request by pmp except or mmio
448  // XSPerfAccumulate("fdip_prefetch_discard_by_pmp",         p2_discard && p2_pmp_except)
449  // // discard prefetch request by hit mainPipe info
450  // // XSPerfAccumulate("fdip_prefetch_discard_by_mainPipe",    p2_discard && p2_mainPipe_hit)
451}