xref: /XiangShan/src/main/scala/xiangshan/frontend/icache/IPrefetch.scala (revision a38d1eab87777ed93b417106a7dfd58a062cee18)
1/***************************************************************************************
2  * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3  * Copyright (c) 2020-2021 Peng Cheng Laboratory
4  *
5  * XiangShan is licensed under Mulan PSL v2.
6  * You can use this software according to the terms and conditions of the Mulan PSL v2.
7  * You may obtain a copy of Mulan PSL v2 at:
8  *          http://license.coscl.org.cn/MulanPSL2
9  *
10  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11  * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12  * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13  *
14  * See the Mulan PSL v2 for more details.
15  ***************************************************************************************/
16
17package xiangshan.frontend.icache
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import difftest._
23import freechips.rocketchip.tilelink._
24import utils._
25import xiangshan.cache.mmu._
26import xiangshan.frontend._
27import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
28import huancun.PreferCacheKey
29import xiangshan.XSCoreParamsKey
30import xiangshan.SoftIfetchPrefetchBundle
31import utility._
32
33abstract class IPrefetchBundle(implicit p: Parameters) extends ICacheBundle
34abstract class IPrefetchModule(implicit p: Parameters) extends ICacheModule
35
36class IPrefetchReq(implicit p: Parameters) extends IPrefetchBundle {
37  val startAddr     : UInt   = UInt(VAddrBits.W)
38  val nextlineStart : UInt   = UInt(VAddrBits.W)
39  val ftqIdx        : FtqPtr = new FtqPtr
40  val isSoftPrefetch: Bool   = Bool()
41  def crossCacheline: Bool   = startAddr(blockOffBits - 1) === 1.U
42
43  def fromFtqICacheInfo(info: FtqICacheInfo): IPrefetchReq = {
44    this.startAddr := info.startAddr
45    this.nextlineStart := info.nextlineStart
46    this.ftqIdx := info.ftqIdx
47    this.isSoftPrefetch := false.B
48    this
49  }
50
51  def fromSoftPrefetch(req: SoftIfetchPrefetchBundle): IPrefetchReq = {
52    this.startAddr := req.vaddr
53    this.nextlineStart := req.vaddr + (1 << blockOffBits).U
54    this.ftqIdx := DontCare
55    this.isSoftPrefetch := true.B
56    this
57  }
58}
59
60class IPrefetchIO(implicit p: Parameters) extends IPrefetchBundle {
61  // control
62  val csr_pf_enable     = Input(Bool())
63  val csr_parity_enable = Input(Bool())
64  val flush             = Input(Bool())
65
66  val req               = Flipped(Decoupled(new IPrefetchReq))
67  val flushFromBpu      = Flipped(new BpuFlushInfo)
68  val itlb              = Vec(PortNumber, new TlbRequestIO)
69  val pmp               = Vec(PortNumber, new ICachePMPBundle)
70  val metaRead          = new ICacheMetaReqBundle
71  val MSHRReq           = DecoupledIO(new ICacheMissReq)
72  val MSHRResp          = Flipped(ValidIO(new ICacheMissResp))
73  val wayLookupWrite    = DecoupledIO(new WayLookupInfo)
74}
75
76class IPrefetchPipe(implicit p: Parameters) extends  IPrefetchModule
77{
78  val io: IPrefetchIO = IO(new IPrefetchIO)
79
80  val (toITLB,  fromITLB) = (io.itlb.map(_.req), io.itlb.map(_.resp))
81  val (toPMP,  fromPMP)   = (io.pmp.map(_.req), io.pmp.map(_.resp))
82  val (toMeta,  fromMeta) = (io.metaRead.toIMeta,  io.metaRead.fromIMeta)
83  val (toMSHR, fromMSHR)  = (io.MSHRReq, io.MSHRResp)
84  val toWayLookup = io.wayLookupWrite
85
86  val s0_fire, s1_fire, s2_fire             = WireInit(false.B)
87  val s0_discard, s2_discard                = WireInit(false.B)
88  val s0_ready, s1_ready, s2_ready          = WireInit(false.B)
89  val s0_flush, s1_flush, s2_flush          = WireInit(false.B)
90  val from_bpu_s0_flush, from_bpu_s1_flush  = WireInit(false.B)
91
92  /**
93    ******************************************************************************
94    * IPrefetch Stage 0
95    * - 1. receive ftq req
96    * - 2. send req to ITLB
97    * - 3. send req to Meta SRAM
98    ******************************************************************************
99    */
100  val s0_valid  = io.req.valid
101
102  /**
103    ******************************************************************************
104    * receive ftq req
105    ******************************************************************************
106    */
107  val s0_req_vaddr    = VecInit(Seq(io.req.bits.startAddr, io.req.bits.nextlineStart))
108  val s0_req_ftqIdx   = io.req.bits.ftqIdx
109  val s0_isSoftPrefetch = io.req.bits.isSoftPrefetch
110  val s0_doubleline   = io.req.bits.crossCacheline
111  val s0_req_vSetIdx  = s0_req_vaddr.map(get_idx)
112
113  from_bpu_s0_flush := !s0_isSoftPrefetch && (io.flushFromBpu.shouldFlushByStage2(s0_req_ftqIdx) ||
114                                              io.flushFromBpu.shouldFlushByStage3(s0_req_ftqIdx))
115  s0_flush := io.flush || from_bpu_s0_flush || s1_flush
116
117  val s0_can_go = s1_ready && toITLB(0).ready && toITLB(1).ready && toMeta.ready
118  io.req.ready := s0_can_go
119
120  s0_fire := s0_valid && s0_can_go && !s0_flush
121
122  /**
123    ******************************************************************************
124    * IPrefetch Stage 1
125    * - 1. Receive resp from ITLB
126    * - 2. Receive resp from IMeta and check
127    * - 3. Monitor the requests from missUnit to write to SRAM.
128    * - 4. Wirte wayLookup
129    ******************************************************************************
130    */
131  val s1_valid = generatePipeControl(lastFire = s0_fire, thisFire = s1_fire, thisFlush = s1_flush, lastFlush = false.B)
132
133  val s1_req_vaddr    = RegEnable(s0_req_vaddr, 0.U.asTypeOf(s0_req_vaddr), s0_fire)
134  val s1_isSoftPrefetch = RegEnable(s0_isSoftPrefetch, 0.U.asTypeOf(s0_isSoftPrefetch), s0_fire)
135  val s1_doubleline   = RegEnable(s0_doubleline, 0.U.asTypeOf(s0_doubleline), s0_fire)
136  val s1_req_ftqIdx   = RegEnable(s0_req_ftqIdx, 0.U.asTypeOf(s0_req_ftqIdx), s0_fire)
137  val s1_req_vSetIdx  = VecInit(s1_req_vaddr.map(get_idx))
138
139  val m_idle :: m_itlbResend :: m_metaResend :: m_enqWay :: m_enterS2 :: Nil = Enum(5)
140  val state = RegInit(m_idle)
141  val next_state = WireDefault(state)
142  val s0_fire_r = RegNext(s0_fire)
143  dontTouch(state)
144  dontTouch(next_state)
145  state := next_state
146
147  /**
148    ******************************************************************************
149    * resend itlb req if miss
150    ******************************************************************************
151    */
152  val s1_wait_itlb  = RegInit(VecInit(Seq.fill(PortNumber)(false.B)))
153  (0 until PortNumber).foreach { i =>
154    when(s1_flush) {
155      s1_wait_itlb(i) := false.B
156    }.elsewhen(RegNext(s0_fire) && fromITLB(i).bits.miss) {
157      s1_wait_itlb(i) := true.B
158    }.elsewhen(s1_wait_itlb(i) && !fromITLB(i).bits.miss) {
159      s1_wait_itlb(i) := false.B
160    }
161  }
162  val s1_need_itlb    = VecInit(Seq((RegNext(s0_fire) || s1_wait_itlb(0)) && fromITLB(0).bits.miss,
163                                    (RegNext(s0_fire) || s1_wait_itlb(1)) && fromITLB(1).bits.miss && s1_doubleline))
164  val tlb_valid_pulse = VecInit(Seq((RegNext(s0_fire) || s1_wait_itlb(0)) && !fromITLB(0).bits.miss,
165                                    (RegNext(s0_fire) || s1_wait_itlb(1)) && !fromITLB(1).bits.miss && s1_doubleline))
166  val tlb_valid_latch = VecInit((0 until PortNumber).map(i => ValidHoldBypass(tlb_valid_pulse(i), s1_fire, flush=s1_flush)))
167  val itlb_finish     = tlb_valid_latch(0) && (!s1_doubleline || tlb_valid_latch(1))
168
169  for (i <- 0 until PortNumber) {
170    toITLB(i).valid             := s1_need_itlb(i) || (s0_valid && (if(i == 0) true.B else s0_doubleline))
171    toITLB(i).bits              := DontCare
172    toITLB(i).bits.size         := 3.U
173    toITLB(i).bits.vaddr        := Mux(s1_need_itlb(i), s1_req_vaddr(i), s0_req_vaddr(i))
174    toITLB(i).bits.debug.pc     := Mux(s1_need_itlb(i), s1_req_vaddr(i), s0_req_vaddr(i))
175    toITLB(i).bits.cmd          := TlbCmd.exec
176    toITLB(i).bits.no_translate := false.B
177  }
178  fromITLB.foreach(_.ready := true.B)
179  io.itlb.foreach(_.req_kill := false.B)
180
181  /**
182    ******************************************************************************
183    * Receive resp from ITLB
184    ******************************************************************************
185    */
186  val s1_req_paddr_wire     = VecInit(fromITLB.map(_.bits.paddr(0)))
187  val s1_req_paddr_reg      = VecInit((0 until PortNumber).map( i =>
188    RegEnable(s1_req_paddr_wire(i), 0.U(PAddrBits.W), tlb_valid_pulse(i))
189  ))
190  val s1_req_paddr          = VecInit((0 until PortNumber).map( i =>
191    Mux(tlb_valid_pulse(i), s1_req_paddr_wire(i), s1_req_paddr_reg(i))
192  ))
193  val s1_req_gpaddr_tmp     = VecInit((0 until PortNumber).map( i =>
194    ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U.asTypeOf(fromITLB(i).bits.gpaddr(0)), data = fromITLB(i).bits.gpaddr(0))
195  ))
196  val s1_req_isForVSnonLeafPTE_tmp    = VecInit((0 until PortNumber).map( i =>
197    ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U.asTypeOf(fromITLB(i).bits.isForVSnonLeafPTE), data = fromITLB(i).bits.isForVSnonLeafPTE)
198  ))
199  val s1_itlb_exception     = VecInit((0 until PortNumber).map( i =>
200    ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U(ExceptionType.width.W), data = ExceptionType.fromTlbResp(fromITLB(i).bits))
201  ))
202  val s1_itlb_pbmt          = VecInit((0 until PortNumber).map( i =>
203    ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U.asTypeOf(fromITLB(i).bits.pbmt(0)), data = fromITLB(i).bits.pbmt(0))
204  ))
205  val s1_itlb_exception_gpf = VecInit(s1_itlb_exception.map(_ === ExceptionType.gpf))
206
207  /* Select gpaddr with the first gpf
208   * Note: the backend wants the base guest physical address of a fetch block
209   *       for port(i), its base gpaddr is actually (gpaddr - i * blocksize)
210   *       see GPAMem: https://github.com/OpenXiangShan/XiangShan/blob/344cf5d55568dd40cd658a9ee66047a505eeb504/src/main/scala/xiangshan/backend/GPAMem.scala#L33-L34
211   *       see also: https://github.com/OpenXiangShan/XiangShan/blob/344cf5d55568dd40cd658a9ee66047a505eeb504/src/main/scala/xiangshan/frontend/IFU.scala#L374-L375
212   */
213  val s1_req_gpaddr = PriorityMuxDefault(
214    s1_itlb_exception_gpf zip (0 until PortNumber).map(i => s1_req_gpaddr_tmp(i) - (i << blockOffBits).U),
215    0.U.asTypeOf(s1_req_gpaddr_tmp(0))
216  )
217
218  val s1_req_isForVSnonLeafPTE = PriorityMuxDefault(
219    s1_itlb_exception_gpf zip s1_req_isForVSnonLeafPTE_tmp,
220    0.U.asTypeOf(s1_req_isForVSnonLeafPTE_tmp(0))
221  )
222
223  /**
224    ******************************************************************************
225    * resend metaArray read req when itlb miss finish
226    ******************************************************************************
227    */
228  val s1_need_meta = ((state === m_itlbResend) && itlb_finish) || (state === m_metaResend)
229  toMeta.valid              := s1_need_meta || s0_valid
230  toMeta.bits               := DontCare
231  toMeta.bits.isDoubleLine  := Mux(s1_need_meta, s1_doubleline, s0_doubleline)
232
233  for (i <- 0 until PortNumber) {
234    toMeta.bits.vSetIdx(i)  := Mux(s1_need_meta, s1_req_vSetIdx(i), s0_req_vSetIdx(i))
235  }
236
237  /**
238    ******************************************************************************
239    * Receive resp from IMeta and check
240    ******************************************************************************
241    */
242  val s1_req_ptags    = VecInit(s1_req_paddr.map(get_phy_tag))
243
244  val s1_meta_ptags   = fromMeta.tags
245  val s1_meta_valids  = fromMeta.entryValid
246
247  def get_waymask(paddrs: Vec[UInt]): Vec[UInt] = {
248    val ptags         = paddrs.map(get_phy_tag)
249    val tag_eq_vec    = VecInit((0 until PortNumber).map( p => VecInit((0 until nWays).map( w => s1_meta_ptags(p)(w) === ptags(p)))))
250    val tag_match_vec = VecInit((0 until PortNumber).map( k => VecInit(tag_eq_vec(k).zipWithIndex.map{ case(way_tag_eq, w) => way_tag_eq && s1_meta_valids(k)(w)})))
251    val waymasks      = VecInit(tag_match_vec.map(_.asUInt))
252    waymasks
253  }
254
255  val s1_SRAM_waymasks = VecInit((0 until PortNumber).map { port =>
256    Mux(tlb_valid_pulse(port), get_waymask(s1_req_paddr_wire)(port), get_waymask(s1_req_paddr_reg)(port))
257  })
258
259  // select ecc code
260  /* NOTE:
261   * When ECC check fails, s1_waymasks may be corrupted, so this selected meta_codes may be wrong.
262   * However, we can guarantee that the request sent to the l2 cache and the response to the IFU are both correct,
263   * considering the probability of bit flipping abnormally is very small, consider there's up to 1 bit being wrong:
264   * 1. miss -> fake hit: The wrong bit in s1_waymasks was set to true.B, thus selects the wrong meta_codes,
265   *                      but we can detect this by checking whether `encodeMetaECC(req_ptags) === meta_codes`.
266   * 2. hit -> fake multi-hit: In normal situation, multi-hit never happens, so multi-hit indicates ECC failure,
267   *                           we can detect this by checking whether `PopCount(waymasks) <= 1.U`,
268   *                           and meta_codes is not important in this situation.
269   * 3. hit -> fake miss: We can't detect this, but we can (pre)fetch the correct data from L2 cache, so it's not a problem.
270   * 4. hit -> hit / miss -> miss: ECC failure happens in a irrelevant way, so we don't care about it this time.
271   */
272  val s1_SRAM_meta_codes = VecInit((0 until PortNumber).map { port =>
273    Mux1H(s1_SRAM_waymasks(port), fromMeta.codes(port))
274  })
275
276  /**
277    ******************************************************************************
278    * update waymasks and meta_codes according to MSHR update data
279    ******************************************************************************
280    */
281  def update_meta_info(mask: UInt, vSetIdx: UInt, ptag: UInt, code: UInt): Tuple2[UInt, UInt] = {
282    require(mask.getWidth == nWays)
283    val new_mask  = WireInit(mask)
284    val new_code  = WireInit(code)
285    val valid = fromMSHR.valid && !fromMSHR.bits.corrupt
286    val vset_same = fromMSHR.bits.vSetIdx === vSetIdx
287    val ptag_same = getPhyTagFromBlk(fromMSHR.bits.blkPaddr) === ptag
288    val way_same  = fromMSHR.bits.waymask === mask
289    when(valid && vset_same) {
290      when(ptag_same) {
291        new_mask := fromMSHR.bits.waymask
292        // also update meta_codes
293        // we have getPhyTagFromBlk(fromMSHR.bits.blkPaddr) === ptag, so we can use ptag directly for better timing
294        new_code := encodeMetaECC(ptag)
295      }.elsewhen(way_same) {
296        new_mask := 0.U
297        // we dont care about new_code, since it's not used for a missed request
298      }
299    }
300    (new_mask, new_code)
301  }
302
303  val s1_SRAM_valid = s0_fire_r || RegNext(s1_need_meta && toMeta.ready)
304  val s1_MSHR_valid = fromMSHR.valid && !fromMSHR.bits.corrupt
305  val s1_waymasks   = WireInit(VecInit(Seq.fill(PortNumber)(0.U(nWays.W))))
306  val s1_waymasks_r = RegEnable(s1_waymasks, 0.U.asTypeOf(s1_waymasks), s1_SRAM_valid || s1_MSHR_valid)
307  val s1_meta_codes   = WireInit(VecInit(Seq.fill(PortNumber)(0.U(ICacheMetaCodeBits.W))))
308  val s1_meta_codes_r = RegEnable(s1_meta_codes, 0.U.asTypeOf(s1_meta_codes), s1_SRAM_valid || s1_MSHR_valid)
309
310  // update waymasks and meta_codes
311  (0 until PortNumber).foreach{i =>
312    val old_waymask    = Mux(s1_SRAM_valid, s1_SRAM_waymasks(i),   s1_waymasks_r(i))
313    val old_meta_codes = Mux(s1_SRAM_valid, s1_SRAM_meta_codes(i), s1_meta_codes_r(i))
314    val new_info = update_meta_info(old_waymask, s1_req_vSetIdx(i), s1_req_ptags(i), old_meta_codes)
315    s1_waymasks(i)   := new_info._1
316    s1_meta_codes(i) := new_info._2
317  }
318
319  /**
320    ******************************************************************************
321    * send enqueu req to WayLookup
322    ******** **********************************************************************
323    */
324  // Disallow enqueuing wayLookup when SRAM write occurs.
325  toWayLookup.valid             := ((state === m_enqWay) || ((state === m_idle) && itlb_finish)) &&
326    !s1_flush && !fromMSHR.valid && !s1_isSoftPrefetch  // do not enqueue soft prefetch
327  toWayLookup.bits.vSetIdx      := s1_req_vSetIdx
328  toWayLookup.bits.waymask      := s1_waymasks
329  toWayLookup.bits.ptag         := s1_req_ptags
330  toWayLookup.bits.gpaddr       := s1_req_gpaddr
331  toWayLookup.bits.isForVSnonLeafPTE      := s1_req_isForVSnonLeafPTE
332  toWayLookup.bits.meta_codes   := s1_meta_codes
333  (0 until PortNumber).foreach { i =>
334    val excpValid = (if (i == 0) true.B else s1_doubleline)  // exception in first line is always valid, in second line is valid iff is doubleline request
335    // Send s1_itlb_exception to WayLookup (instead of s1_exception_out) for better timing. Will check pmp again in mainPipe
336    toWayLookup.bits.itlb_exception(i) := Mux(excpValid, s1_itlb_exception(i), ExceptionType.none)
337    toWayLookup.bits.itlb_pbmt(i)      := Mux(excpValid, s1_itlb_pbmt(i), Pbmt.pma)
338  }
339
340  val s1_waymasks_vec = s1_waymasks.map(_.asTypeOf(Vec(nWays, Bool())))
341  when(toWayLookup.fire) {
342    assert(PopCount(s1_waymasks_vec(0)) <= 1.U && (PopCount(s1_waymasks_vec(1)) <= 1.U || !s1_doubleline),
343      "Multiple hit in main pipe, port0:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x port1:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x ",
344      PopCount(s1_waymasks_vec(0)) > 1.U, s1_req_ptags(0), get_idx(s1_req_vaddr(0)), s1_req_vaddr(0),
345      PopCount(s1_waymasks_vec(1)) > 1.U && s1_doubleline, s1_req_ptags(1), get_idx(s1_req_vaddr(1)), s1_req_vaddr(1))
346  }
347
348  /**
349    ******************************************************************************
350    * PMP check
351    ******************************************************************************
352    */
353  toPMP.zipWithIndex.foreach { case (p, i) =>
354    // if itlb has exception, paddr can be invalid, therefore pmp check can be skipped
355    p.valid     := s1_valid // && s1_itlb_exception === ExceptionType.none
356    p.bits.addr := s1_req_paddr(i)
357    p.bits.size := 3.U // TODO
358    p.bits.cmd  := TlbCmd.exec
359  }
360  val s1_pmp_exception = VecInit(fromPMP.map(ExceptionType.fromPMPResp))
361  val s1_pmp_mmio      = VecInit(fromPMP.map(_.mmio))
362
363  // merge s1 itlb/pmp exceptions, itlb has the highest priority, pmp next
364  // for timing consideration, meta_corrupt is not merged, and it will NOT cancel prefetch
365  val s1_exception_out = ExceptionType.merge(
366    s1_itlb_exception,
367    s1_pmp_exception
368  )
369
370  // merge pmp mmio and itlb pbmt
371  val s1_mmio = VecInit((s1_pmp_mmio zip s1_itlb_pbmt).map{ case (mmio, pbmt) =>
372    mmio || Pbmt.isUncache(pbmt)
373  })
374
375  /**
376    ******************************************************************************
377    * state machine
378    ******** **********************************************************************
379    */
380
381  switch(state) {
382    is(m_idle) {
383      when(s1_valid) {
384        when(!itlb_finish) {
385          next_state := m_itlbResend
386        }.elsewhen(!toWayLookup.fire) {  // itlb_finish
387          next_state := m_enqWay
388        }.elsewhen(!s2_ready) {  // itlb_finish && toWayLookup.fire
389          next_state := m_enterS2
390        } // .otherwise { next_state := m_idle }
391      } // .otherwise { next_state := m_idle }  // !s1_valid
392    }
393    is(m_itlbResend) {
394      when(itlb_finish) {
395        when(!toMeta.ready) {
396          next_state := m_metaResend
397        }.otherwise { // toMeta.ready
398          next_state := m_enqWay
399        }
400      } // .otherwise { next_state := m_itlbResend }  // !itlb_finish
401    }
402    is(m_metaResend) {
403      when(toMeta.ready) {
404        next_state := m_enqWay
405      } // .otherwise { next_state := m_metaResend }  // !toMeta.ready
406    }
407    is(m_enqWay) {
408      when(toWayLookup.fire || s1_isSoftPrefetch) {
409        when (!s2_ready) {
410          next_state := m_enterS2
411        }.otherwise {  // s2_ready
412          next_state := m_idle
413        }
414      } // .otherwise { next_state := m_enqWay }
415    }
416    is(m_enterS2) {
417      when(s2_ready) {
418        next_state := m_idle
419      }
420    }
421  }
422
423  when(s1_flush) {
424    next_state := m_idle
425  }
426
427  /** Stage 1 control */
428  from_bpu_s1_flush := s1_valid && !s1_isSoftPrefetch && io.flushFromBpu.shouldFlushByStage3(s1_req_ftqIdx)
429  s1_flush := io.flush || from_bpu_s1_flush
430
431  s1_ready      := next_state === m_idle
432  s1_fire       := (next_state === m_idle) && s1_valid && !s1_flush  // used to clear s1_valid & itlb_valid_latch
433  val s1_real_fire = s1_fire && io.csr_pf_enable                     // real "s1 fire" that s1 enters s2
434
435  /**
436    ******************************************************************************
437    * IPrefetch Stage 2
438    * - 1. Monitor the requests from missUnit to write to SRAM.
439    * - 2. send req to missUnit
440    ******************************************************************************
441    */
442  val s2_valid  = generatePipeControl(lastFire = s1_real_fire, thisFire = s2_fire, thisFlush = s2_flush, lastFlush = false.B)
443
444  val s2_req_vaddr    = RegEnable(s1_req_vaddr,     0.U.asTypeOf(s1_req_vaddr),     s1_real_fire)
445  val s2_isSoftPrefetch = RegEnable(s1_isSoftPrefetch, 0.U.asTypeOf(s1_isSoftPrefetch), s1_real_fire)
446  val s2_doubleline   = RegEnable(s1_doubleline,    0.U.asTypeOf(s1_doubleline),    s1_real_fire)
447  val s2_req_paddr    = RegEnable(s1_req_paddr,     0.U.asTypeOf(s1_req_paddr),     s1_real_fire)
448  val s2_exception    = RegEnable(s1_exception_out, 0.U.asTypeOf(s1_exception_out), s1_real_fire)  // includes itlb/pmp exception
449//  val s2_exception_in = RegEnable(s1_exception_out, 0.U.asTypeOf(s1_exception_out), s1_real_fire)  // disabled for timing consideration
450  val s2_mmio         = RegEnable(s1_mmio,          0.U.asTypeOf(s1_mmio),          s1_real_fire)
451  val s2_waymasks     = RegEnable(s1_waymasks,      0.U.asTypeOf(s1_waymasks),      s1_real_fire)
452//  val s2_meta_codes   = RegEnable(s1_meta_codes,    0.U.asTypeOf(s1_meta_codes),    s1_real_fire)  // disabled for timing consideration
453
454  val s2_req_vSetIdx  = s2_req_vaddr.map(get_idx)
455  val s2_req_ptags    = s2_req_paddr.map(get_phy_tag)
456
457  // disabled for timing consideration
458//  // do metaArray ECC check
459//  val s2_meta_corrupt = VecInit((s2_req_ptags zip s2_meta_codes zip s2_waymasks).map{ case ((meta, code), waymask) =>
460//    val hit_num = PopCount(waymask)
461//    // NOTE: if not hit, encodeMetaECC(meta) =/= code can also be true, but we don't care about it
462//    (encodeMetaECC(meta) =/= code && hit_num === 1.U) ||  // hit one way, but parity code does not match, ECC failure
463//      hit_num > 1.U                                       // hit multi way, must be a ECC failure
464//  })
465//
466//  // generate exception
467//  val s2_meta_exception = VecInit(s2_meta_corrupt.map(ExceptionType.fromECC(io.csr_parity_enable, _)))
468//
469//  // merge meta exception and itlb/pmp exception
470//  val s2_exception = ExceptionType.merge(s2_exception_in, s2_meta_exception)
471
472  /**
473    ******************************************************************************
474    * Monitor the requests from missUnit to write to SRAM
475    ******************************************************************************
476    */
477
478  /* NOTE: If fromMSHR.bits.corrupt, we should set s2_MSHR_hits to false.B, and send prefetch requests again.
479   * This is the opposite of how mainPipe handles fromMSHR.bits.corrupt,
480   *   in which we should set s2_MSHR_hits to true.B, and send error to ifu.
481   */
482  val s2_MSHR_match = VecInit((0 until PortNumber).map(i =>
483    (s2_req_vSetIdx(i) === fromMSHR.bits.vSetIdx) &&
484    (s2_req_ptags(i) === getPhyTagFromBlk(fromMSHR.bits.blkPaddr)) &&
485    s2_valid && fromMSHR.valid && !fromMSHR.bits.corrupt
486  ))
487  val s2_MSHR_hits = (0 until PortNumber).map(i => ValidHoldBypass(s2_MSHR_match(i), s2_fire || s2_flush))
488
489  val s2_SRAM_hits = s2_waymasks.map(_.orR)
490  val s2_hits = VecInit((0 until PortNumber).map(i => s2_MSHR_hits(i) || s2_SRAM_hits(i)))
491
492  /* s2_exception includes itlb pf/gpf/af, pmp af and meta corruption (af), neither of which should be prefetched
493   * mmio should not be prefetched
494   * also, if previous has exception, latter port should also not be prefetched
495   */
496  val s2_miss = VecInit((0 until PortNumber).map { i =>
497    !s2_hits(i) && (if (i==0) true.B else s2_doubleline) &&
498      s2_exception.take(i+1).map(_ === ExceptionType.none).reduce(_&&_) &&
499      s2_mmio.take(i+1).map(!_).reduce(_&&_)
500  })
501
502  /**
503    ******************************************************************************
504    * send req to missUnit
505    ******************************************************************************
506    */
507  val toMSHRArbiter = Module(new Arbiter(new ICacheMissReq, PortNumber))
508
509  // To avoid sending duplicate requests.
510  val has_send = RegInit(VecInit(Seq.fill(PortNumber)(false.B)))
511  (0 until PortNumber).foreach{ i =>
512    when(s1_real_fire) {
513      has_send(i) := false.B
514    }.elsewhen(toMSHRArbiter.io.in(i).fire) {
515      has_send(i) := true.B
516    }
517  }
518
519  (0 until PortNumber).map{ i =>
520    toMSHRArbiter.io.in(i).valid          := s2_valid && s2_miss(i) && !has_send(i)
521    toMSHRArbiter.io.in(i).bits.blkPaddr  := getBlkAddr(s2_req_paddr(i))
522    toMSHRArbiter.io.in(i).bits.vSetIdx   := s2_req_vSetIdx(i)
523  }
524
525  toMSHR <> toMSHRArbiter.io.out
526
527  s2_flush := io.flush
528
529  // toMSHRArbiter.io.in(i).fire is not used here for timing consideration
530  // val s2_finish  = (0 until PortNumber).map(i => has_send(i) || !s2_miss(i) || toMSHRArbiter.io.in(i).fire).reduce(_&&_)
531  val s2_finish  = (0 until PortNumber).map(i => has_send(i) || !s2_miss(i)).reduce(_&&_)
532  s2_ready      := s2_finish || !s2_valid
533  s2_fire       := s2_valid && s2_finish && !s2_flush
534
535  /** PerfAccumulate */
536  // the number of bpu flush
537  XSPerfAccumulate("bpu_s0_flush", from_bpu_s0_flush)
538  XSPerfAccumulate("bpu_s1_flush", from_bpu_s1_flush)
539  // the number of prefetch request received from ftq or backend (software prefetch)
540//  XSPerfAccumulate("prefetch_req_receive", io.req.fire)
541  XSPerfAccumulate("prefetch_req_receive_hw", io.req.fire && !io.req.bits.isSoftPrefetch)
542  XSPerfAccumulate("prefetch_req_receive_sw", io.req.fire && io.req.bits.isSoftPrefetch)
543  // the number of prefetch request sent to missUnit
544//  XSPerfAccumulate("prefetch_req_send", toMSHR.fire)
545  XSPerfAccumulate("prefetch_req_send_hw", toMSHR.fire && !s2_isSoftPrefetch)
546  XSPerfAccumulate("prefetch_req_send_sw", toMSHR.fire && s2_isSoftPrefetch)
547  XSPerfAccumulate("to_missUnit_stall", toMSHR.valid && !toMSHR.ready)
548  /**
549    * Count the number of requests that are filtered for various reasons.
550    * The number of prefetch discard in Performance Accumulator may be
551    * a littel larger the number of really discarded. Because there can
552    * be multiple reasons for a canceled request at the same time.
553    */
554  // discard prefetch request by flush
555  // XSPerfAccumulate("fdip_prefetch_discard_by_tlb_except",  p1_discard && p1_tlb_except)
556  // // discard prefetch request by hit icache SRAM
557  // XSPerfAccumulate("fdip_prefetch_discard_by_hit_cache",   p2_discard && p1_meta_hit)
558  // // discard prefetch request by hit wirte SRAM
559  // XSPerfAccumulate("fdip_prefetch_discard_by_p1_monoitor", p1_discard && p1_monitor_hit)
560  // // discard prefetch request by pmp except or mmio
561  // XSPerfAccumulate("fdip_prefetch_discard_by_pmp",         p2_discard && p2_pmp_except)
562  // // discard prefetch request by hit mainPipe info
563  // // XSPerfAccumulate("fdip_prefetch_discard_by_mainPipe",    p2_discard && p2_mainPipe_hit)
564}