xref: /XiangShan/src/main/scala/xiangshan/frontend/icache/ICacheMainPipe.scala (revision 92b88f30156d46e844042eea94f7121557fd09a1)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend.icache
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import difftest.DifftestRefillEvent
23import freechips.rocketchip.tilelink.ClientStates
24import xiangshan._
25import xiangshan.cache.mmu._
26import utils._
27import utility._
28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
29import xiangshan.frontend.{FtqICacheInfo, FtqToICacheRequestBundle}
30
31class ICacheMainPipeReq(implicit p: Parameters) extends ICacheBundle
32{
33  val vaddr  = UInt(VAddrBits.W)
34  def vsetIdx = get_idx(vaddr)
35}
36
37class ICacheMainPipeResp(implicit p: Parameters) extends ICacheBundle
38{
39  val vaddr    = UInt(VAddrBits.W)
40  val registerData = UInt(blockBits.W)
41  val sramData = UInt(blockBits.W)
42  val select   = Bool()
43  val paddr    = UInt(PAddrBits.W)
44  val tlbExcp  = new Bundle{
45    val pageFault = Bool()
46    val accessFault = Bool()
47    val mmio = Bool()
48  }
49}
50
51class ICacheMainPipeBundle(implicit p: Parameters) extends ICacheBundle
52{
53  val req  = Flipped(Decoupled(new FtqToICacheRequestBundle))
54  val resp = Vec(PortNumber, ValidIO(new ICacheMainPipeResp))
55}
56
57class ICacheMetaReqBundle(implicit p: Parameters) extends ICacheBundle{
58  val toIMeta       = DecoupledIO(new ICacheReadBundle)
59  val fromIMeta     = Input(new ICacheMetaRespBundle)
60}
61
62class ICacheDataReqBundle(implicit p: Parameters) extends ICacheBundle{
63  val toIData       = DecoupledIO(Vec(partWayNum, new ICacheReadBundle))
64  val fromIData     = Input(new ICacheDataRespBundle)
65}
66
67class ICacheMSHRBundle(implicit p: Parameters) extends ICacheBundle{
68  val toMSHR        = Decoupled(new ICacheMissReq)
69  val fromMSHR      = Flipped(ValidIO(new ICacheMissResp))
70}
71
72class ICachePMPBundle(implicit p: Parameters) extends ICacheBundle{
73  val req  = Valid(new PMPReqBundle())
74  val resp = Input(new PMPRespBundle())
75}
76
77class ICachePerfInfo(implicit p: Parameters) extends ICacheBundle{
78  val only_0_hit     = Bool()
79  val only_0_miss    = Bool()
80  val hit_0_hit_1    = Bool()
81  val hit_0_miss_1   = Bool()
82  val miss_0_hit_1   = Bool()
83  val miss_0_miss_1  = Bool()
84  val hit_0_except_1 = Bool()
85  val miss_0_except_1 = Bool()
86  val except_0       = Bool()
87  val bank_hit       = Vec(2,Bool())
88  val hit            = Bool()
89}
90
91class ICacheMainPipeInterface(implicit p: Parameters) extends ICacheBundle {
92  val hartId = Input(UInt(8.W))
93  /*** internal interface ***/
94  val metaArray   = new ICacheMetaReqBundle
95  val dataArray   = new ICacheDataReqBundle
96  /** prefetch io */
97  val iprefetchBuf = Flipped(new IPFBufferRead)
98  val PIQ          = Flipped(Vec(nPrefetchEntries,new PIQToMainPipe))
99  val IPFBufMove   = Flipped(new IPFBufferMove)
100  val mainPipeMissInfo = new MainPipeMissInfo()
101  val missSlotInfo = Vec(PortNumber, ValidIO(new MainPipeToPrefetchPipe))
102
103  val mshr        = Vec(PortNumber, new ICacheMSHRBundle)
104  val errors      = Output(Vec(PortNumber, new L1CacheErrorInfo))
105  /*** outside interface ***/
106  //val fetch       = Vec(PortNumber, new ICacheMainPipeBundle)
107  /* when ftq.valid is high in T + 1 cycle
108   * the ftq component must be valid in T cycle
109   */
110  val fetch       = new ICacheMainPipeBundle
111  val pmp         = Vec(PortNumber, new ICachePMPBundle)
112  val itlb        = Vec(PortNumber, new TlbRequestIO)
113  val respStall   = Input(Bool())
114  val perfInfo = Output(new ICachePerfInfo)
115
116  val prefetchEnable = Output(Bool())
117  val prefetchDisable = Output(Bool())
118  val csr_parity_enable = Input(Bool())
119
120}
121
122class ICacheMainPipe(implicit p: Parameters) extends ICacheModule
123{
124  val io = IO(new ICacheMainPipeInterface)
125
126  /** Input/Output port */
127  val (fromFtq, toIFU)    = (io.fetch.req, io.fetch.resp)
128  val (toMeta, metaResp)  = (io.metaArray.toIMeta, io.metaArray.fromIMeta)
129  val (toData, dataResp)  = (io.dataArray.toIData,  io.dataArray.fromIData)
130  val (toIPF,  fromIPF)   = (io.iprefetchBuf.req,   io.iprefetchBuf.resp)
131  val (toMSHR, fromMSHR)  = (io.mshr.map(_.toMSHR), io.mshr.map(_.fromMSHR))
132  val (toITLB, fromITLB)  = (io.itlb.map(_.req), io.itlb.map(_.resp))
133  val (toPMP,  fromPMP)   = (io.pmp.map(_.req), io.pmp.map(_.resp))
134  val fromPIQ             = io.PIQ.map(_.info)
135  val IPFBufferMove       = io.IPFBufMove
136  val missSlotInfo        = io.missSlotInfo
137  val mainPipeMissInfo    = io.mainPipeMissInfo
138
139  io.itlb.foreach(_.req_kill := false.B)
140
141
142  //Ftq RegNext Register
143  val fromFtqReq = fromFtq.bits.pcMemRead
144
145  /** pipeline control signal */
146  val s1_ready, s2_ready = Wire(Bool())
147  val s0_fire,  s1_fire , s2_fire  = Wire(Bool())
148
149  val missSwitchBit = RegInit(false.B)
150
151  /** replacement status register */
152  val touch_sets = Seq.fill(2)(Wire(Vec(2, UInt(log2Ceil(nSets/2).W))))
153  val touch_ways = Seq.fill(2)(Wire(Vec(2, Valid(UInt(log2Ceil(nWays).W)))) )
154
155  /**
156    ******************************************************************************
157    * ICache Stage 0
158    * - send req to ITLB and wait for tlb miss fixing
159    * - send req to Meta/Data SRAM
160    ******************************************************************************
161    */
162
163  /** s0 control */
164  val s0_valid       = fromFtq.valid
165  val s0_req_vaddr   = (0 until partWayNum + 1).map(i => VecInit(Seq(fromFtqReq(i).startAddr, fromFtqReq(i).nextlineStart)))
166  val s0_req_vsetIdx = (0 until partWayNum + 1).map(i => VecInit(s0_req_vaddr(i).map(get_idx(_))))
167  val s0_only_first  = (0 until partWayNum + 1).map(i => fromFtq.bits.readValid(i) && !fromFtqReq(i).crossCacheline)
168  val s0_double_line = (0 until partWayNum + 1).map(i => fromFtq.bits.readValid(i) &&  fromFtqReq(i).crossCacheline)
169
170  val s0_final_valid        = s0_valid
171  val s0_final_vaddr        = s0_req_vaddr.head
172  val s0_final_vsetIdx      = s0_req_vsetIdx.head
173  val s0_final_only_first   = s0_only_first.head
174  val s0_final_double_line  = s0_double_line.head
175
176  /** SRAM request */
177  //0 -> metaread, 1,2,3 -> data, 3 -> code 4 -> itlb
178  // TODO: it seems like 0,1,2,3 -> dataArray(data); 3 -> dataArray(code); 0 -> metaArray; 4 -> itlb
179  val ftq_req_to_data_doubleline  = s0_double_line.init
180  val ftq_req_to_data_vset_idx    = s0_req_vsetIdx.init
181  val ftq_req_to_data_valid       = fromFtq.bits.readValid.init
182
183  val ftq_req_to_meta_doubleline  = s0_double_line.head
184  val ftq_req_to_meta_vset_idx    = s0_req_vsetIdx.head
185
186  val ftq_req_to_itlb_only_first  = s0_only_first.last
187  val ftq_req_to_itlb_doubleline  = s0_double_line.last
188  val ftq_req_to_itlb_vaddr       = s0_req_vaddr.last
189  val ftq_req_to_itlb_vset_idx    = s0_req_vsetIdx.last
190
191
192  for(i <- 0 until partWayNum) {
193    toData.valid                  := ftq_req_to_data_valid(i) && !missSwitchBit
194    toData.bits(i).isDoubleLine   := ftq_req_to_data_doubleline(i)
195    toData.bits(i).vSetIdx        := ftq_req_to_data_vset_idx(i)
196  }
197
198  toMeta.valid               := s0_valid && !missSwitchBit
199  toMeta.bits.isDoubleLine   := ftq_req_to_meta_doubleline
200  toMeta.bits.vSetIdx        := ftq_req_to_meta_vset_idx
201
202
203  toITLB(0).valid         := s0_valid
204  toITLB(0).bits.size     := 3.U // TODO: fix the size
205  toITLB(0).bits.vaddr    := ftq_req_to_itlb_vaddr(0)
206  toITLB(0).bits.debug.pc := ftq_req_to_itlb_vaddr(0)
207
208  toITLB(1).valid         := s0_valid && ftq_req_to_itlb_doubleline
209  toITLB(1).bits.size     := 3.U // TODO: fix the size
210  toITLB(1).bits.vaddr    := ftq_req_to_itlb_vaddr(1)
211  toITLB(1).bits.debug.pc := ftq_req_to_itlb_vaddr(1)
212
213  toITLB.map{port =>
214    port.bits.cmd                 := TlbCmd.exec
215    port.bits.memidx              := DontCare
216    port.bits.debug.robIdx        := DontCare
217    port.bits.no_translate        := false.B
218    port.bits.debug.isFirstIssue  := DontCare
219  }
220
221  /** ITLB & ICACHE sync case
222   * when icache is not ready, but itlb is ready
223   * because itlb is non-block, then the req will take the port
224   * then itlb will unset the ready?? itlb is wrongly blocked.
225   * Solution: maybe give itlb a signal to tell whether acquire the slot?
226   */
227
228  val itlb_can_go    = toITLB(0).ready && toITLB(1).ready
229  val icache_can_go  = toData.ready && toMeta.ready
230  val pipe_can_go    = !missSwitchBit && s1_ready
231  val s0_can_go      = itlb_can_go && icache_can_go && pipe_can_go
232  val s0_fetch_fire  = s0_valid && s0_can_go
233  s0_fire        := s0_fetch_fire
234  toITLB.map{port => port.bits.kill := !icache_can_go || !pipe_can_go}
235
236  //TODO: fix GTimer() condition
237  fromFtq.ready := s0_can_go
238
239  /**
240    ******************************************************************************
241    * ICache Stage 1
242    * - get tlb resp data (exceptiong info and physical addresses)
243    * - get Meta/Data SRAM read responses (latched for pipeline stop)
244    * - tag compare/hit check
245    ******************************************************************************
246    */
247
248  /** s1 control */
249
250  val s1_valid = generatePipeControl(lastFire = s0_fire, thisFire = s1_fire, thisFlush = false.B, lastFlush = false.B)
251
252  val s1_req_vaddr   = RegEnable(s0_final_vaddr, s0_fire)
253  val s1_req_vsetIdx = RegEnable(s0_final_vsetIdx, s0_fire)
254  val s1_only_first  = RegEnable(s0_final_only_first, s0_fire)
255  val s1_double_line = RegEnable(s0_final_double_line, s0_fire)
256  val s1_wait        = Wire(Bool())
257
258  /** tlb response latch for pipeline stop */
259  val tlb_back = fromITLB.map(_.fire())
260  val tlb_need_back = VecInit((0 until PortNumber).map(i => ValidHold(s0_fire && toITLB(i).fire(), s1_fire, false.B)))
261  val tlb_already_recv = RegInit(VecInit(Seq.fill(PortNumber)(false.B)))
262  val tlb_ready_recv = VecInit((0 until PortNumber).map(i => RegNext(s0_fire, false.B) || (s1_valid && !tlb_already_recv(i))))
263  val tlb_resp_valid = Wire(Vec(2, Bool()))
264  for (i <- 0 until PortNumber) {
265    tlb_resp_valid(i) := tlb_already_recv(i) || (tlb_ready_recv(i) && tlb_back(i))
266    when (tlb_already_recv(i) && s1_fire) {
267      tlb_already_recv(i) := false.B
268    }
269    when (tlb_back(i) && tlb_ready_recv(i) && !s1_fire) {
270      tlb_already_recv(i) := true.B
271    }
272    fromITLB(i).ready := tlb_ready_recv(i)
273  }
274  assert(RegNext(Cat((0 until PortNumber).map(i => tlb_need_back(i) || !tlb_resp_valid(i))).andR(), true.B),
275    "when tlb should not back, tlb should not resp valid")
276  assert(RegNext(!s1_valid || Cat(tlb_need_back).orR, true.B), "when s1_valid, need at least one tlb_need_back")
277  assert(RegNext(s1_valid || !Cat(tlb_need_back).orR, true.B), "when !s1_valid, all the tlb_need_back should be false")
278  assert(RegNext(s1_valid || !Cat(tlb_already_recv).orR, true.B), "when !s1_valid, should not tlb_already_recv")
279  assert(RegNext(s1_valid || !Cat(tlb_resp_valid).orR, true.B), "when !s1_valid, should not tlb_resp_valid")
280
281  val tlbRespPAddr = VecInit((0 until PortNumber).map(i => ResultHoldBypass(valid = tlb_back(i), data = fromITLB(i).bits.paddr(0))))
282  val tlbExcpPF = VecInit((0 until PortNumber).map(i => ResultHoldBypass(valid = tlb_back(i), data = fromITLB(i).bits.excp(0).pf.instr) && tlb_need_back(i)))
283  val tlbExcpAF = VecInit((0 until PortNumber).map(i => ResultHoldBypass(valid = tlb_back(i), data = fromITLB(i).bits.excp(0).af.instr) && tlb_need_back(i)))
284  val tlbExcp = VecInit((0 until PortNumber).map(i => tlbExcpPF(i) || tlbExcpPF(i)))
285
286  val tlbRespAllValid = Cat((0 until PortNumber).map(i => !tlb_need_back(i) || tlb_resp_valid(i))).andR
287  s1_ready := s2_ready && tlbRespAllValid && !s1_wait  || !s1_valid
288  s1_fire  := s1_valid && tlbRespAllValid && s2_ready && !s1_wait
289
290  /** s1 hit check/tag compare */
291  val s1_req_paddr              = tlbRespPAddr
292  val s1_req_ptags              = VecInit(s1_req_paddr.map(get_phy_tag(_)))
293
294  val s1_meta_ptags              = ResultHoldBypass(data = metaResp.tags, valid = RegNext(s0_fire))
295  val s1_meta_valids             = ResultHoldBypass(data = metaResp.entryValid, valid = RegNext(s0_fire))
296  val s1_meta_errors             = ResultHoldBypass(data = metaResp.errors, valid = RegNext(s0_fire))
297
298  val s1_data_cacheline          = ResultHoldBypass(data = dataResp.datas, valid = RegNext(s0_fire))
299  val s1_data_errorBits          = ResultHoldBypass(data = dataResp.codes, valid = RegNext(s0_fire))
300
301  val s1_tag_eq_vec        = VecInit((0 until PortNumber).map( p => VecInit((0 until nWays).map( w =>  s1_meta_ptags(p)(w) ===  s1_req_ptags(p) ))))
302  val s1_tag_match_vec     = VecInit((0 until PortNumber).map( k => VecInit(s1_tag_eq_vec(k).zipWithIndex.map{ case(way_tag_eq, w) => way_tag_eq && s1_meta_valids(k)(w) /*s1_meta_cohs(k)(w).isValid()*/})))
303  val s1_tag_match         = VecInit(s1_tag_match_vec.map(vector => ParallelOR(vector)))
304
305  val s1_port_hit          = VecInit(Seq(s1_tag_match(0) && s1_valid  && !tlbExcp(0),  s1_tag_match(1) && s1_valid && s1_double_line && !tlbExcp(1) ))
306  val s1_bank_miss         = VecInit(Seq(!s1_tag_match(0) && s1_valid && !tlbExcp(0), !s1_tag_match(1) && s1_valid && s1_double_line && !tlbExcp(1) ))
307  val s1_hit               = (s1_port_hit(0) && s1_port_hit(1)) || (!s1_double_line && s1_port_hit(0))
308
309  /** choose victim cacheline */
310  val replacers       = Seq.fill(PortNumber)(ReplacementPolicy.fromString(cacheParams.replacer,nWays,nSets/PortNumber))
311  val s1_victim_oh    = ResultHoldBypass(data = VecInit(replacers.zipWithIndex.map{case (replacer, i) => UIntToOH(replacer.way(s1_req_vsetIdx(i)(highestIdxBit, 1)))}), valid = RegNext(s0_fire))
312
313
314  when(s1_fire){
315//    when (!(PopCount(s1_tag_match_vec(0)) <= 1.U && (PopCount(s1_tag_match_vec(1)) <= 1.U || !s1_double_line))) {
316//      printf("Multiple hit in main pipe\n")
317//    }
318    assert(PopCount(s1_tag_match_vec(0)) <= 1.U && (PopCount(s1_tag_match_vec(1)) <= 1.U || !s1_double_line),
319      "Multiple hit in main pipe, port0:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x port1:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x ",
320      PopCount(s1_tag_match_vec(0)) > 1.U,s1_req_ptags(0), get_idx(s1_req_vaddr(0)), s1_req_vaddr(0),
321      PopCount(s1_tag_match_vec(1)) > 1.U && s1_double_line, s1_req_ptags(1), get_idx(s1_req_vaddr(1)), s1_req_vaddr(1))
322  }
323
324  ((replacers zip touch_sets) zip touch_ways).map{case ((r, s),w) => r.access(s,w)}
325
326  IPFBufferMove.waymask := UIntToOH(replacers(0).way(IPFBufferMove.vsetIdx))
327  /** check ipf */
328  toIPF(0).valid := s1_valid && tlb_resp_valid(0)
329  toIPF(1).valid := s1_valid && s1_double_line && tlb_resp_valid(1)
330  (0 until PortNumber).foreach { i =>
331    toIPF(i).bits.vaddr := s1_req_vaddr(i)
332    toIPF(i).bits.paddr := s1_req_paddr(i)
333  }
334  val s1_ipf_hit = VecInit((0 until PortNumber).map(i => toIPF(i).valid && fromIPF(i).valid && fromIPF(i).bits.ipf_hit)) // check in same cycle
335  val s1_ipf_hit_latch = VecInit((0 until PortNumber).map(i => holdReleaseLatch(valid = s1_ipf_hit(i), release = s1_fire, flush = false.B))) // when ipf return hit data, latch it!
336  val s1_ipf_data = VecInit((0 until PortNumber).map(i => ResultHoldBypass(data = fromIPF(i).bits.cacheline, valid = s1_ipf_hit(i))))
337
338  /** check in PIQ, if hit, wait until prefetch port hit */
339  //TODO: move this to PIQ
340  val PIQ_hold_res = RegInit(VecInit(Seq.fill(PortNumber)(false.B)))
341  fromPIQ.foreach(_.ready := true.B)
342  val PIQ_hit_oh = VecInit((0 until PortNumber).map(i =>
343    VecInit(fromPIQ.map(entry => entry.valid &&
344      entry.bits.vSetIdx === s1_req_vsetIdx(i) &&
345      entry.bits.ptage === s1_req_ptags(i))))) // TODO : when piq1 has data piq0 miss but both hit,now we still need stall
346  (0 until PortNumber).foreach(i => assert(PopCount(PIQ_hit_oh(i)) <= 1.U, "multiple hit in PIQ\n"))
347  val PIQ_hit         = VecInit(Seq(PIQ_hit_oh(0).reduce(_||_) && s1_valid && tlbRespAllValid, PIQ_hit_oh(1).reduce(_||_) && s1_valid && s1_double_line && tlbRespAllValid)) // TODO: Handle TLB blocking in the PIQ
348  val PIQ_hit_data    = VecInit((0 until PortNumber).map(i => Mux1H(PIQ_hit_oh(i), fromPIQ.map(_.bits.cacheline))))
349  val PIQ_data_valid  = VecInit((0 until PortNumber).map(i => Mux1H(PIQ_hit_oh(i), fromPIQ.map(_.bits.writeBack))))
350  val s1_wait_vec     = VecInit((0 until PortNumber).map(i => !s1_port_hit(i) && !s1_ipf_hit_latch(i) && PIQ_hit(i) && !PIQ_data_valid(i) && !PIQ_hold_res(i)))
351  val PIQ_write_back  = VecInit((0 until PortNumber).map(i => !s1_port_hit(i) && !s1_ipf_hit_latch(i) && PIQ_hit(i) && PIQ_data_valid(i)))
352  val s1_PIQ_hit      = VecInit((0 until PortNumber).map(i => PIQ_write_back(i) || PIQ_hold_res(i)))
353  s1_wait := s1_valid && ((s1_wait_vec(0) && !tlbExcp(0)) || (s1_double_line && s1_wait_vec(1) && !tlbExcp(0) && !tlbExcp(1)))
354
355  (0 until PortNumber).foreach(i =>
356    when(s1_fire){
357      PIQ_hold_res(i) := false.B
358    }.elsewhen(PIQ_write_back(i)){
359      PIQ_hold_res(i) := true.B
360    }
361  )
362
363  val s1_PIQ_data = VecInit((0 until PortNumber).map(
364    i =>
365      ResultHoldBypass(data = PIQ_hit_data(i), valid = PIQ_write_back(i))
366  ))
367
368  val s1_prefetch_hit = VecInit((0 until PortNumber).map(i => s1_ipf_hit_latch(i) || s1_PIQ_hit(i)))
369  val s1_prefetch_hit_data = VecInit((0 until PortNumber).map(i => Mux(s1_ipf_hit_latch(i),s1_ipf_data(i), s1_PIQ_data(i))))
370
371  if (env.EnableDifftest) {
372    (0 until PortNumber).foreach { i =>
373      val diffPIQ = Module(new DifftestRefillEvent)
374      diffPIQ.io.clock := clock
375      diffPIQ.io.coreid := io.hartId
376      diffPIQ.io.cacheid := (i + 7).U
377      if (i == 0) diffPIQ.io.valid := s1_fire && !s1_port_hit(i) && !s1_ipf_hit_latch(i) && s1_PIQ_hit(i) && !tlbExcp(0)
378      else diffPIQ.io.valid := s1_fire && !s1_port_hit(i) && !s1_ipf_hit_latch(i) && s1_PIQ_hit(i) && s1_double_line && !tlbExcp(0) && !tlbExcp(1)
379      diffPIQ.io.addr := s1_req_paddr(i)
380      diffPIQ.io.data := s1_PIQ_data(i).asTypeOf(diffPIQ.io.data)
381    }
382  }
383
384  /** when tlb stall, ipfBuffer stage2 need also stall */
385  mainPipeMissInfo.s1_already_check_ipf := s1_valid && tlbRespAllValid // when tlb back, s1 must has already check ipf
386
387  /** <PERF> replace victim way number */
388
389  (0 until nWays).map{ w =>
390    XSPerfAccumulate("line_0_hit_way_" + Integer.toString(w, 10),  s1_fire && s1_port_hit(0) && OHToUInt(s1_tag_match_vec(0))  === w.U)
391  }
392
393  (0 until nWays).map{ w =>
394    XSPerfAccumulate("line_0_victim_way_" + Integer.toString(w, 10),  s1_fire && !s1_port_hit(0) && OHToUInt(s1_victim_oh(0))  === w.U)
395  }
396
397  (0 until nWays).map{ w =>
398    XSPerfAccumulate("line_1_hit_way_" + Integer.toString(w, 10),  s1_fire && s1_double_line && s1_port_hit(1) && OHToUInt(s1_tag_match_vec(1))  === w.U)
399  }
400
401  (0 until nWays).map{ w =>
402    XSPerfAccumulate("line_1_victim_way_" + Integer.toString(w, 10),  s1_fire && s1_double_line && !s1_port_hit(1) && OHToUInt(s1_victim_oh(1))  === w.U)
403  }
404
405  XSPerfAccumulate("mainPipe_stage1_block_by_piq_cycles", s1_valid && s1_wait)
406
407  /**
408    ******************************************************************************
409    * ICache Stage 2
410    * - send request to MSHR if ICache miss
411    * - generate secondary miss status/data registers
412    * - response to IFU
413    ******************************************************************************
414    */
415
416  /** s2 control */
417  val s2_fetch_finish = Wire(Bool())
418
419  val s2_valid          = generatePipeControl(lastFire = s1_fire, thisFire = s2_fire, thisFlush = false.B, lastFlush = false.B)
420  val s2_miss_available = Wire(Bool())
421
422  s2_ready      := (s2_valid && s2_fetch_finish && !io.respStall) || (!s2_valid && s2_miss_available)
423  s2_fire       := s2_valid && s2_fetch_finish && !io.respStall
424
425  /** s2 data */
426  val mmio = fromPMP.map(port => port.mmio) // TODO: handle it
427
428  val (s2_req_paddr , s2_req_vaddr)   = (RegEnable(s1_req_paddr, s1_fire), RegEnable(s1_req_vaddr, s1_fire))
429  val s2_req_vsetIdx  = RegEnable(s1_req_vsetIdx, s1_fire)
430  val s2_req_ptags    = RegEnable(s1_req_ptags, s1_fire)
431  val s2_only_first   = RegEnable(s1_only_first, s1_fire)
432  val s2_double_line  = RegEnable(s1_double_line, s1_fire)
433  val s2_hit          = RegEnable(s1_hit   , s1_fire)
434  val s2_port_hit     = RegEnable(s1_port_hit, s1_fire)
435  val s2_bank_miss    = RegEnable(s1_bank_miss, s1_fire)
436  val s2_waymask      = RegEnable(s1_victim_oh, s1_fire)
437  val s2_tag_match_vec = RegEnable(s1_tag_match_vec, s1_fire)
438  val s2_prefetch_hit = RegEnable(s1_prefetch_hit, s1_fire)
439  val s2_prefetch_hit_data = RegEnable(s1_prefetch_hit_data, s1_fire)
440  val s2_prefetch_hit_in_ipf = RegEnable(s1_ipf_hit_latch, s1_fire)
441  val s2_prefetch_hit_in_piq = RegEnable(s1_PIQ_hit, s1_fire)
442
443  assert(RegNext(!s2_valid || s2_req_paddr(0)(11,0) === s2_req_vaddr(0)(11,0), true.B))
444
445  /** status imply that s2 is a secondary miss (no need to resend miss request) */
446  val sec_meet_vec = Wire(Vec(2, Bool()))
447  val s2_fixed_hit_vec = VecInit((0 until 2).map(i => s2_port_hit(i) || s2_prefetch_hit(i) || sec_meet_vec(i)))
448  val s2_fixed_hit = (s2_valid && s2_fixed_hit_vec(0) && s2_fixed_hit_vec(1) && s2_double_line) || (s2_valid && s2_fixed_hit_vec(0) && !s2_double_line)
449
450  val s2_meta_errors    = RegEnable(s1_meta_errors,    s1_fire)
451  val s2_data_errorBits = RegEnable(s1_data_errorBits, s1_fire)
452  val s2_data_cacheline = RegEnable(s1_data_cacheline, s1_fire)
453
454  val s2_data_errors    = Wire(Vec(PortNumber,Vec(nWays, Bool())))
455
456  (0 until PortNumber).map{ i =>
457    val read_datas = s2_data_cacheline(i).asTypeOf(Vec(nWays,Vec(dataCodeUnitNum, UInt(dataCodeUnit.W))))
458    val read_codes = s2_data_errorBits(i).asTypeOf(Vec(nWays,Vec(dataCodeUnitNum, UInt(dataCodeBits.W))))
459    val data_full_wayBits = VecInit((0 until nWays).map( w =>
460                                  VecInit((0 until dataCodeUnitNum).map(u =>
461                                        Cat(read_codes(w)(u), read_datas(w)(u))))))
462    val data_error_wayBits = VecInit((0 until nWays).map( w =>
463                                  VecInit((0 until dataCodeUnitNum).map(u =>
464                                       cacheParams.dataCode.decode(data_full_wayBits(w)(u)).error ))))
465    if(i == 0){
466      (0 until nWays).map{ w =>
467        s2_data_errors(i)(w) := RegNext(RegNext(s1_fire)) && RegNext(data_error_wayBits(w)).reduce(_||_)
468      }
469    } else {
470      (0 until nWays).map{ w =>
471        s2_data_errors(i)(w) := RegNext(RegNext(s1_fire)) && RegNext(RegNext(s1_double_line)) && RegNext(data_error_wayBits(w)).reduce(_||_)
472      }
473    }
474  }
475
476  val s2_parity_meta_error  = VecInit((0 until PortNumber).map(i => s2_meta_errors(i).reduce(_||_) && io.csr_parity_enable))
477  val s2_parity_data_error  = VecInit((0 until PortNumber).map(i => s2_data_errors(i).reduce(_||_) && io.csr_parity_enable))
478  val s2_parity_error       = VecInit((0 until PortNumber).map(i => RegNext(s2_parity_meta_error(i)) || s2_parity_data_error(i)))
479
480  for(i <- 0 until PortNumber){
481    io.errors(i).valid            := RegNext(s2_parity_error(i) && RegNext(RegNext(s1_fire)))
482    io.errors(i).report_to_beu    := RegNext(s2_parity_error(i) && RegNext(RegNext(s1_fire)))
483    io.errors(i).paddr            := RegNext(RegNext(s2_req_paddr(i)))
484    io.errors(i).source           := DontCare
485    io.errors(i).source.tag       := RegNext(RegNext(s2_parity_meta_error(i)))
486    io.errors(i).source.data      := RegNext(s2_parity_data_error(i))
487    io.errors(i).source.l2        := false.B
488    io.errors(i).opType           := DontCare
489    io.errors(i).opType.fetch     := true.B
490  }
491  XSError(s2_parity_error.reduce(_||_) && RegNext(RegNext(s1_fire)), "ICache has parity error in MainPaipe!")
492
493
494  /** exception and pmp logic **/
495  //PMP Result
496  val s2_tlb_need_back = VecInit((0 until PortNumber).map(i => ValidHold(tlb_need_back(i) && s1_fire, s2_fire, false.B)))
497  val pmpExcpAF = Wire(Vec(PortNumber, Bool()))
498  pmpExcpAF(0)  := fromPMP(0).instr && s2_tlb_need_back(0)
499  pmpExcpAF(1)  := fromPMP(1).instr && s2_double_line && s2_tlb_need_back(1)
500  //exception information
501  //short delay exception signal
502  val s2_except_pf        = RegEnable(tlbExcpPF, s1_fire)
503  val s2_except_tlb_af    = RegEnable(tlbExcpAF, s1_fire)
504  //long delay exception signal
505  val s2_except_pmp_af    =  DataHoldBypass(pmpExcpAF, RegNext(s1_fire))
506  // val s2_except_parity_af =  VecInit(s2_parity_error(i) && RegNext(RegNext(s1_fire))                      )
507
508  val s2_except    = VecInit((0 until 2).map{i => s2_except_pf(i) || s2_except_tlb_af(i)})
509  val s2_has_except = s2_valid && (s2_except_tlb_af.reduce(_||_) || s2_except_pf.reduce(_||_))
510  //MMIO
511  val s2_mmio      = DataHoldBypass(io.pmp(0).resp.mmio && !s2_except_tlb_af(0) && !s2_except_pmp_af(0) && !s2_except_pf(0), RegNext(s1_fire)).asBool() && s2_valid
512
513  //send physical address to PMP
514  io.pmp.zipWithIndex.map { case (p, i) =>
515    p.req.valid := s2_valid && !missSwitchBit
516    p.req.bits.addr := s2_req_paddr(i)
517    p.req.bits.size := 3.U // TODO
518    p.req.bits.cmd := TlbCmd.exec
519  }
520
521  /*** cacheline miss logic ***/
522  val wait_idle :: wait_queue_ready :: wait_send_req  :: wait_two_resp :: wait_0_resp :: wait_1_resp :: wait_one_resp ::wait_finish :: wait_pmp_except :: Nil = Enum(9)
523  val wait_state = RegInit(wait_idle)
524
525//  val port_miss_fix  = VecInit(Seq(fromMSHR(0).fire() && !s2_port_hit(0),   fromMSHR(1).fire() && s2_double_line && !s2_port_hit(1) ))
526
527  // secondary miss record registers
528  class MissSlot(implicit p: Parameters) extends  ICacheBundle {
529    val m_vSetIdx   = UInt(idxBits.W)
530    val m_pTag      = UInt(tagBits.W)
531    val m_data      = UInt(blockBits.W)
532    val m_corrupt   = Bool()
533  }
534
535  val missSlot    = Seq.fill(2)(RegInit(0.U.asTypeOf(new MissSlot)))
536  val m_invalid :: m_valid :: m_refilled :: m_flushed :: m_wait_sec_miss :: m_check_final ::Nil = Enum(6)
537  val missStateQueue = RegInit(VecInit(Seq.fill(2)(m_invalid)) )
538  val reservedRefillData = Wire(Vec(2, UInt(blockBits.W)))
539
540  s2_miss_available :=  VecInit(missStateQueue.map(entry => entry === m_invalid  || entry === m_wait_sec_miss)).reduce(_&&_)
541
542  val fix_sec_miss     = Wire(Vec(4, Bool()))
543  val sec_meet_0_miss = fix_sec_miss(0) || fix_sec_miss(2)
544  val sec_meet_1_miss = fix_sec_miss(1) || fix_sec_miss(3)
545  sec_meet_vec := VecInit(Seq(sec_meet_0_miss,sec_meet_1_miss ))
546
547  /*** miss/hit pattern: <Control Signal> only raise at the first cycle of s2_valid ***/
548  val cacheline_0_hit  = (s2_port_hit(0) || s2_prefetch_hit(0) || sec_meet_0_miss)
549  val cacheline_0_miss = !s2_port_hit(0) && !s2_prefetch_hit(0) && !sec_meet_0_miss
550
551  val cacheline_1_hit  = (s2_port_hit(1) || s2_prefetch_hit(1) || sec_meet_1_miss)
552  val cacheline_1_miss = !s2_port_hit(1) && !s2_prefetch_hit(1) && !sec_meet_1_miss
553
554  val  only_0_miss      = RegNext(s1_fire) && cacheline_0_miss && !s2_double_line && !s2_has_except && !s2_mmio
555  val  only_0_hit       = RegNext(s1_fire) && cacheline_0_hit  && !s2_double_line && !s2_mmio
556  val  hit_0_hit_1      = RegNext(s1_fire) && cacheline_0_hit  && cacheline_1_hit  && s2_double_line && !s2_mmio
557  val  hit_0_miss_1     = RegNext(s1_fire) && cacheline_0_hit  && cacheline_1_miss && s2_double_line  && !s2_has_except && !s2_mmio
558  val  miss_0_hit_1     = RegNext(s1_fire) && cacheline_0_miss && cacheline_1_hit && s2_double_line  && !s2_has_except && !s2_mmio
559  val  miss_0_miss_1    = RegNext(s1_fire) && cacheline_0_miss && cacheline_1_miss && s2_double_line  && !s2_has_except && !s2_mmio
560
561  val  hit_0_except_1   = RegNext(s1_fire) && s2_double_line &&  !s2_except(0) && s2_except(1)  &&  cacheline_0_hit
562  val  miss_0_except_1  = RegNext(s1_fire) && s2_double_line &&  !s2_except(0) && s2_except(1)  &&  cacheline_0_miss
563  val  except_0         = RegNext(s1_fire) && s2_except(0)
564
565  /*** miss/hit pattern latch: <Control Signal> latch the miss/hit patter if pipeline stop ***/
566  val  miss_0_hit_1_latch     =   holdReleaseLatch(valid = miss_0_hit_1,    release = s2_fire,      flush = false.B)
567  val  miss_0_miss_1_latch    =   holdReleaseLatch(valid = miss_0_miss_1,   release = s2_fire,      flush = false.B)
568  val  only_0_miss_latch      =   holdReleaseLatch(valid = only_0_miss,     release = s2_fire,      flush = false.B)
569  val  hit_0_miss_1_latch     =   holdReleaseLatch(valid = hit_0_miss_1,    release = s2_fire,      flush = false.B)
570
571  val  miss_0_except_1_latch  =   holdReleaseLatch(valid = miss_0_except_1, release = s2_fire,      flush = false.B)
572  val  except_0_latch          =   holdReleaseLatch(valid = except_0,    release = s2_fire,      flush = false.B)
573  val  hit_0_except_1_latch         =    holdReleaseLatch(valid = hit_0_except_1,    release = s2_fire,      flush = false.B)
574
575  val only_0_hit_latch        = holdReleaseLatch(valid = only_0_hit,   release = s2_fire,      flush = false.B)
576  val hit_0_hit_1_latch        = holdReleaseLatch(valid = hit_0_hit_1,   release = s2_fire,      flush = false.B)
577
578
579  /*** secondary miss judgment ***/
580
581  def waitSecondComeIn(missState: UInt): Bool = (missState === m_wait_sec_miss)
582
583  def getMissSituat(slotNum : Int, missNum : Int ) :Bool =  {
584    RegNext(s1_fire) &&
585    RegNext(missSlot(slotNum).m_vSetIdx === s1_req_vsetIdx(missNum)) &&
586    RegNext(missSlot(slotNum).m_pTag  === s1_req_ptags(missNum)) &&
587    !s2_port_hit(missNum) && !s2_prefetch_hit(missNum) &&
588    waitSecondComeIn(missStateQueue(slotNum))
589  }
590
591  val miss_0_s2_0 =   getMissSituat(slotNum = 0, missNum = 0)
592  val miss_0_s2_1 =   getMissSituat(slotNum = 0, missNum = 1)
593  val miss_1_s2_0 =   getMissSituat(slotNum = 1, missNum = 0)
594  val miss_1_s2_1 =   getMissSituat(slotNum = 1, missNum = 1)
595
596  val miss_0_s2_0_latch =   holdReleaseLatch(valid = miss_0_s2_0,    release = s2_fire,      flush = false.B)
597  val miss_0_s2_1_latch =   holdReleaseLatch(valid = miss_0_s2_1,    release = s2_fire,      flush = false.B)
598  val miss_1_s2_0_latch =   holdReleaseLatch(valid = miss_1_s2_0,    release = s2_fire,      flush = false.B)
599  val miss_1_s2_1_latch =   holdReleaseLatch(valid = miss_1_s2_1,    release = s2_fire,      flush = false.B)
600
601
602  val slot_0_solve = fix_sec_miss(0) || fix_sec_miss(1)
603  val slot_1_solve = fix_sec_miss(2) || fix_sec_miss(3)
604  val slot_slove   = VecInit(Seq(slot_0_solve, slot_1_solve))
605
606  fix_sec_miss   := VecInit(Seq(miss_0_s2_0_latch, miss_0_s2_1_latch, miss_1_s2_0_latch, miss_1_s2_1_latch))
607
608  /*** reserved data for secondary miss ***/
609
610  reservedRefillData(0) := DataHoldBypass(data = missSlot(0).m_data, valid = miss_0_s2_0 || miss_0_s2_1)
611  reservedRefillData(1) := DataHoldBypass(data = missSlot(1).m_data, valid = miss_1_s2_0 || miss_1_s2_1)
612
613  /*** miss state machine ***/
614
615  //deal with not-cache-hit pmp af
616  val only_pmp_af = Wire(Vec(2, Bool()))
617  only_pmp_af(0) := s2_except_pmp_af(0) && cacheline_0_miss && !s2_except(0) && s2_valid
618  only_pmp_af(1) := s2_except_pmp_af(1) && cacheline_1_miss && !s2_except(1) && s2_valid && s2_double_line
619
620  switch(wait_state){
621    is(wait_idle){
622      when(only_pmp_af(0) || only_pmp_af(1) || s2_mmio){
623        //should not send req to MissUnit when there is an access exception in PMP
624        //But to avoid using pmp exception in control signal (like s2_fire), should delay 1 cycle.
625        //NOTE: pmp exception cache line also could hit in ICache, but the result is meaningless. Just give the exception signals.
626        wait_state := wait_finish
627      }.elsewhen(miss_0_except_1_latch){
628        wait_state :=  Mux(toMSHR(0).ready, wait_queue_ready ,wait_idle )
629      }.elsewhen( only_0_miss_latch  || miss_0_hit_1_latch){
630        wait_state :=  Mux(toMSHR(0).ready, wait_queue_ready ,wait_idle )
631      }.elsewhen(hit_0_miss_1_latch){
632        wait_state :=  Mux(toMSHR(1).ready, wait_queue_ready ,wait_idle )
633      }.elsewhen( miss_0_miss_1_latch ){
634        wait_state := Mux(toMSHR(0).ready && toMSHR(1).ready, wait_queue_ready ,wait_idle)
635      }
636    }
637
638    is(wait_queue_ready){
639      wait_state := wait_send_req
640    }
641
642    is(wait_send_req) {
643      when(miss_0_except_1_latch || only_0_miss_latch || hit_0_miss_1_latch || miss_0_hit_1_latch){
644        wait_state :=  wait_one_resp
645      }.elsewhen( miss_0_miss_1_latch ){
646        wait_state := wait_two_resp
647      }
648    }
649
650    is(wait_one_resp) {
651      when( (miss_0_except_1_latch ||only_0_miss_latch || miss_0_hit_1_latch) && fromMSHR(0).fire()){
652        wait_state := wait_finish
653      }.elsewhen( hit_0_miss_1_latch && fromMSHR(1).fire()){
654        wait_state := wait_finish
655      }
656    }
657
658    is(wait_two_resp) {
659      when(fromMSHR(0).fire() && fromMSHR(1).fire()){
660        wait_state := wait_finish
661      }.elsewhen( !fromMSHR(0).fire() && fromMSHR(1).fire() ){
662        wait_state := wait_0_resp
663      }.elsewhen(fromMSHR(0).fire() && !fromMSHR(1).fire()){
664        wait_state := wait_1_resp
665      }
666    }
667
668    is(wait_0_resp) {
669      when(fromMSHR(0).fire()){
670        wait_state := wait_finish
671      }
672    }
673
674    is(wait_1_resp) {
675      when(fromMSHR(1).fire()){
676        wait_state := wait_finish
677      }
678    }
679
680    is(wait_finish) {when(s2_fire) {wait_state := wait_idle }
681    }
682  }
683
684
685  /*** send request to MissUnit ***/
686
687  (0 until 2).map { i =>
688    if(i == 1) toMSHR(i).valid   := (hit_0_miss_1_latch || miss_0_miss_1_latch) && wait_state === wait_queue_ready && !s2_mmio
689        else     toMSHR(i).valid := (only_0_miss_latch || miss_0_hit_1_latch || miss_0_miss_1_latch || miss_0_except_1_latch) && wait_state === wait_queue_ready && !s2_mmio
690    toMSHR(i).bits.paddr    := s2_req_paddr(i)
691    toMSHR(i).bits.vaddr    := s2_req_vaddr(i)
692    toMSHR(i).bits.waymask  := s2_waymask(i)
693
694
695    when(toMSHR(i).fire() && missStateQueue(i) === m_invalid){
696      missStateQueue(i)     := m_valid
697      missSlot(i).m_vSetIdx := s2_req_vsetIdx(i)
698      missSlot(i).m_pTag    := get_phy_tag(s2_req_paddr(i))
699    }
700
701    when(fromMSHR(i).fire() && missStateQueue(i) === m_valid ){
702      missStateQueue(i)         := m_refilled
703      missSlot(i).m_data        := fromMSHR(i).bits.data
704      missSlot(i).m_corrupt     := fromMSHR(i).bits.corrupt
705    }
706
707
708    when(s2_fire && missStateQueue(i) === m_refilled){
709      missStateQueue(i)     := m_wait_sec_miss
710    }
711
712    /*** Only the first cycle to check whether meet the secondary miss ***/
713    when(missStateQueue(i) === m_wait_sec_miss){
714      /*** The seondary req has been fix by this slot and another also hit || the secondary req for other cacheline and hit ***/
715      when((slot_slove(i) && s2_fire) || (!slot_slove(i) && s2_fire) ) {
716        missStateQueue(i)     := m_invalid
717      }
718      /*** The seondary req has been fix by this slot but another miss/f3 not ready || the seondary req for other cacheline and miss ***/
719      .elsewhen((slot_slove(i) && !s2_fire && s2_valid) ||  (s2_valid && !slot_slove(i) && !s2_fire) ){
720        missStateQueue(i)     := m_check_final
721      }
722    }
723
724    when(missStateQueue(i) === m_check_final && toMSHR(i).fire()){
725      missStateQueue(i)     :=  m_valid
726      missSlot(i).m_vSetIdx := s2_req_vsetIdx(i)
727      missSlot(i).m_pTag    := get_phy_tag(s2_req_paddr(i))
728    }.elsewhen(missStateQueue(i) === m_check_final) {
729      missStateQueue(i)     :=  m_invalid
730    }
731  }
732
733  io.prefetchEnable := false.B
734  io.prefetchDisable := false.B
735  when(toMSHR.map(_.valid).reduce(_||_)){
736    missSwitchBit := true.B
737    io.prefetchEnable := true.B
738  }.elsewhen(missSwitchBit && s2_fetch_finish){
739    missSwitchBit := false.B
740    io.prefetchDisable := true.B
741  }
742
743  (0 until PortNumber).foreach{
744    i =>
745      missSlotInfo(i).valid := missStateQueue(i) =/= m_invalid
746      missSlotInfo(i).bits.vSetIdx := missSlot(i).m_vSetIdx
747      missSlotInfo(i).bits.ptage := missSlot(i).m_pTag
748  }
749
750
751  val miss_all_fix       =  wait_state === wait_finish
752
753  s2_fetch_finish        := ((s2_valid && s2_fixed_hit) || miss_all_fix || hit_0_except_1_latch || except_0_latch)
754
755  /** update replacement status register: 0 is hit access/ 1 is miss access */
756  (touch_ways zip touch_sets).zipWithIndex.map{ case((t_w,t_s), i) =>
757    t_s(0)         := s2_req_vsetIdx(i)(highestIdxBit, 1)
758    t_w(0).valid   := s2_valid && s2_port_hit(i)
759    t_w(0).bits    := OHToUInt(s2_tag_match_vec(i))
760
761    t_s(1)         := s2_req_vsetIdx(i)(highestIdxBit, 1)
762    t_w(1).valid   := s2_valid && !s2_port_hit(i)
763    t_w(1).bits    := OHToUInt(s2_waymask(i))
764  }
765
766  //** use hit one-hot select data
767  val s2_hit_datas    = VecInit(s2_data_cacheline.zipWithIndex.map { case(bank, i) =>
768    val port_hit_data = Mux1H(s2_tag_match_vec(i).asUInt, bank)
769    port_hit_data
770  })
771
772  val s2_register_datas       = Wire(Vec(2, UInt(blockBits.W)))
773
774  s2_register_datas.zipWithIndex.map{case(bank,i) =>
775    // if(i == 0) bank := Mux(s2_port_hit(i), s2_hit_datas(i), Mux(miss_0_s2_0_latch,reservedRefillData(0), Mux(miss_1_s2_0_latch,reservedRefillData(1), missSlot(0).m_data)))
776    // else    bank    := Mux(s2_port_hit(i), s2_hit_datas(i), Mux(miss_0_s2_1_latch,reservedRefillData(0), Mux(miss_1_s2_1_latch,reservedRefillData(1), missSlot(1).m_data)))
777    if(i == 0) bank := Mux(miss_0_s2_0_latch,reservedRefillData(0), Mux(miss_1_s2_0_latch,reservedRefillData(1), missSlot(0).m_data))
778    else    bank    := Mux(miss_0_s2_1_latch,reservedRefillData(0), Mux(miss_1_s2_1_latch,reservedRefillData(1), missSlot(1).m_data))
779  }
780
781  /** response to IFU */
782
783  (0 until PortNumber).map{ i =>
784    if(i ==0) toIFU(i).valid          := s2_fire
785       else   toIFU(i).valid          := s2_fire && s2_double_line
786    //when select is high, use sramData. Otherwise, use registerData.
787    toIFU(i).bits.registerData  := s2_register_datas(i)
788    toIFU(i).bits.sramData  := Mux(s2_port_hit(i), s2_hit_datas(i), s2_prefetch_hit_data(i))
789    toIFU(i).bits.select    := s2_port_hit(i) || s2_prefetch_hit(i)
790    toIFU(i).bits.paddr     := s2_req_paddr(i)
791    toIFU(i).bits.vaddr     := s2_req_vaddr(i)
792    toIFU(i).bits.tlbExcp.pageFault     := s2_except_pf(i)
793    toIFU(i).bits.tlbExcp.accessFault   := s2_except_tlb_af(i) || missSlot(i).m_corrupt || s2_except_pmp_af(i)
794    toIFU(i).bits.tlbExcp.mmio          := s2_mmio
795
796    when(RegNext(s2_fire && missSlot(i).m_corrupt)){
797      io.errors(i).valid            := true.B
798      io.errors(i).report_to_beu    := false.B // l2 should have report that to bus error unit, no need to do it again
799      io.errors(i).paddr            := RegNext(s2_req_paddr(i))
800      io.errors(i).source.tag       := false.B
801      io.errors(i).source.data      := false.B
802      io.errors(i).source.l2        := true.B
803    }
804  }
805  (0 until 2).map {i =>
806    XSPerfAccumulate("port_" + i + "_only_hit_in_ipf", !s2_port_hit(i) && s2_prefetch_hit(i) && s2_fire)
807  }
808
809  /** s2 mainPipe miss info */
810  mainPipeMissInfo.s2_miss_info(0).valid := s2_valid && (miss_0_hit_1_latch || miss_0_miss_1_latch || only_0_miss_latch || miss_0_except_1_latch) && !except_0_latch
811  mainPipeMissInfo.s2_miss_info(1).valid := s2_valid && (miss_0_miss_1_latch || hit_0_miss_1_latch)
812  (0 until 2).foreach { i =>
813    mainPipeMissInfo.s2_miss_info(i).bits.vSetIdx := s2_req_vsetIdx(i)
814    mainPipeMissInfo.s2_miss_info(i).bits.ptage := s2_req_ptags(i)
815  }
816
817  io.perfInfo.only_0_hit    := only_0_hit_latch
818  io.perfInfo.only_0_miss   := only_0_miss_latch
819  io.perfInfo.hit_0_hit_1   := hit_0_hit_1_latch
820  io.perfInfo.hit_0_miss_1  := hit_0_miss_1_latch
821  io.perfInfo.miss_0_hit_1  := miss_0_hit_1_latch
822  io.perfInfo.miss_0_miss_1 := miss_0_miss_1_latch
823  io.perfInfo.hit_0_except_1 := hit_0_except_1_latch
824  io.perfInfo.miss_0_except_1 := miss_0_except_1_latch
825  io.perfInfo.except_0      := except_0_latch
826  io.perfInfo.bank_hit(0)   := only_0_miss_latch  || hit_0_hit_1_latch || hit_0_miss_1_latch || hit_0_except_1_latch
827  io.perfInfo.bank_hit(1)   := miss_0_hit_1_latch || hit_0_hit_1_latch
828  io.perfInfo.hit           := hit_0_hit_1_latch || only_0_hit_latch || hit_0_except_1_latch || except_0_latch
829
830  /** <PERF> fetch bubble generated by icache miss*/
831
832  XSPerfAccumulate("icache_bubble_s2_miss",    s2_valid && !s2_fetch_finish )
833
834  val tlb_miss_vec = VecInit((0 until PortNumber).map(i => toITLB(i).valid && s0_can_go && fromITLB(i).bits.miss))
835  val tlb_has_miss = tlb_miss_vec.reduce(_ || _)
836  XSPerfAccumulate("icache_bubble_s0_tlb_miss",    s0_valid && tlb_has_miss )
837
838  if (env.EnableDifftest) {
839    val discards = (0 until PortNumber).map { i =>
840      val discard = toIFU(i).bits.tlbExcp.pageFault || toIFU(i).bits.tlbExcp.accessFault || toIFU(i).bits.tlbExcp.mmio
841      discard
842    }
843    (0 until PortNumber).map { i =>
844      val diffMainPipeOut = Module(new DifftestRefillEvent)
845      diffMainPipeOut.io.clock := clock
846      diffMainPipeOut.io.coreid := io.hartId
847      diffMainPipeOut.io.cacheid := (4 + i).U
848      if (i == 0) diffMainPipeOut.io.valid := s2_fire && !discards(0)
849      else        diffMainPipeOut.io.valid := s2_fire && s2_double_line && !discards(0) && !discards(1)
850      diffMainPipeOut.io.addr := s2_req_paddr(i)
851      when (toIFU(i).bits.select.asBool) {
852        diffMainPipeOut.io.data := toIFU(i).bits.sramData.asTypeOf(diffMainPipeOut.io.data)
853      } .otherwise {
854        diffMainPipeOut.io.data := toIFU(i).bits.registerData.asTypeOf(diffMainPipeOut.io.data)
855      }
856      // idtfr: 1 -> data from icache 2 -> data from ipf 3 -> data from piq 4 -> data from missUnit
857      when (s2_port_hit(i)) { diffMainPipeOut.io.idtfr := 1.U }
858        .elsewhen(s2_prefetch_hit(i)) {
859          when (s2_prefetch_hit_in_ipf(i)) { diffMainPipeOut.io.idtfr := 2.U  }
860            .elsewhen(s2_prefetch_hit_in_piq(i)) { diffMainPipeOut.io.idtfr := 3.U }
861            .otherwise { XSWarn(true.B, "should not in this situation\n")}
862        }
863        .otherwise { diffMainPipeOut.io.idtfr := 4.U }
864      diffMainPipeOut
865    }
866  }
867}
868