xref: /XiangShan/src/main/scala/xiangshan/frontend/icache/ICacheMainPipe.scala (revision e39d682897e0b6b34970d56398bf3999ace55955)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend.icache
18
19import chisel3._
20import chisel3.util._
21import difftest._
22import freechips.rocketchip.tilelink.ClientStates
23import org.chipsalliance.cde.config.Parameters
24import utility._
25import utils._
26import xiangshan._
27import xiangshan.backend.fu.PMPReqBundle
28import xiangshan.backend.fu.PMPRespBundle
29import xiangshan.cache.mmu._
30import xiangshan.frontend.ExceptionType
31import xiangshan.frontend.FtqICacheInfo
32import xiangshan.frontend.FtqToICacheRequestBundle
33
34class ICacheMainPipeReq(implicit p: Parameters) extends ICacheBundle {
35  val vaddr   = UInt(VAddrBits.W)
36  def vSetIdx = get_idx(vaddr)
37}
38
39class ICacheMainPipeResp(implicit p: Parameters) extends ICacheBundle {
40  val vaddr            = UInt(VAddrBits.W)
41  val data             = UInt(blockBits.W)
42  val paddr            = UInt(PAddrBits.W)
43  val exception        = UInt(ExceptionType.width.W)
44  val pmp_mmio         = Bool()
45  val itlb_pbmt        = UInt(Pbmt.width.W)
46  val backendException = Bool()
47  /* NOTE: GPAddrBits(=50bit) is not enough for gpaddr here, refer to PR#3795
48   * Sv48*4 only allows 50bit gpaddr, when software violates this requirement
49   * it needs to fill the mtval2 register with the full XLEN(=64bit) gpaddr,
50   * PAddrBitsMax(=56bit currently) is required for the frontend datapath due to the itlb ppn length limitation
51   * (cases 56<x<=64 are handled by the backend datapath)
52   */
53  val gpaddr            = UInt(PAddrBitsMax.W)
54  val isForVSnonLeafPTE = Bool()
55}
56
57class ICacheMainPipeBundle(implicit p: Parameters) extends ICacheBundle {
58  val req               = Flipped(Decoupled(new FtqToICacheRequestBundle))
59  val resp              = Vec(PortNumber, ValidIO(new ICacheMainPipeResp))
60  val topdownIcacheMiss = Output(Bool())
61  val topdownItlbMiss   = Output(Bool())
62}
63
64class ICacheMetaReqBundle(implicit p: Parameters) extends ICacheBundle {
65  val toIMeta   = DecoupledIO(new ICacheReadBundle)
66  val fromIMeta = Input(new ICacheMetaRespBundle)
67}
68
69class ICacheDataReqBundle(implicit p: Parameters) extends ICacheBundle {
70  val toIData   = Vec(partWayNum, DecoupledIO(new ICacheReadBundle))
71  val fromIData = Input(new ICacheDataRespBundle)
72}
73
74class ICacheMSHRBundle(implicit p: Parameters) extends ICacheBundle {
75  val req  = Decoupled(new ICacheMissReq)
76  val resp = Flipped(ValidIO(new ICacheMissResp))
77}
78
79class ICachePMPBundle(implicit p: Parameters) extends ICacheBundle {
80  val req  = Valid(new PMPReqBundle())
81  val resp = Input(new PMPRespBundle())
82}
83
84class ICachePerfInfo(implicit p: Parameters) extends ICacheBundle {
85  val only_0_hit      = Bool()
86  val only_0_miss     = Bool()
87  val hit_0_hit_1     = Bool()
88  val hit_0_miss_1    = Bool()
89  val miss_0_hit_1    = Bool()
90  val miss_0_miss_1   = Bool()
91  val hit_0_except_1  = Bool()
92  val miss_0_except_1 = Bool()
93  val except_0        = Bool()
94  val bank_hit        = Vec(2, Bool())
95  val hit             = Bool()
96}
97
98class ICacheMainPipeInterface(implicit p: Parameters) extends ICacheBundle {
99  val hartId = Input(UInt(hartIdLen.W))
100
101  /*** internal interface ***/
102  val dataArray      = new ICacheDataReqBundle
103  val metaArrayFlush = Vec(PortNumber, ValidIO(new ICacheMetaFlushBundle))
104
105  /** prefetch io */
106  val touch         = Vec(PortNumber, ValidIO(new ReplacerTouch))
107  val wayLookupRead = Flipped(DecoupledIO(new WayLookupInfo))
108
109  val mshr   = new ICacheMSHRBundle
110  val errors = Output(Vec(PortNumber, ValidIO(new L1CacheErrorInfo)))
111
112  /*** outside interface ***/
113  // val fetch       = Vec(PortNumber, new ICacheMainPipeBundle)
114  /* when ftq.valid is high in T + 1 cycle
115   * the ftq component must be valid in T cycle
116   */
117  val fetch     = new ICacheMainPipeBundle
118  val pmp       = Vec(PortNumber, new ICachePMPBundle)
119  val respStall = Input(Bool())
120
121  val csr_parity_enable = Input(Bool())
122  val flush             = Input(Bool())
123
124  val perfInfo = Output(new ICachePerfInfo)
125}
126
127class ICacheDB(implicit p: Parameters) extends ICacheBundle {
128  val blk_vaddr = UInt((VAddrBits - blockOffBits).W)
129  val blk_paddr = UInt((PAddrBits - blockOffBits).W)
130  val hit       = Bool()
131}
132
133class ICacheMainPipe(implicit p: Parameters) extends ICacheModule {
134  val io = IO(new ICacheMainPipeInterface)
135
136  /** Input/Output port */
137  val (fromFtq, toIFU)   = (io.fetch.req, io.fetch.resp)
138  val (toData, fromData) = (io.dataArray.toIData, io.dataArray.fromIData)
139  val toMetaFlush        = io.metaArrayFlush
140  val (toMSHR, fromMSHR) = (io.mshr.req, io.mshr.resp)
141  val (toPMP, fromPMP)   = (io.pmp.map(_.req), io.pmp.map(_.resp))
142  val fromWayLookup      = io.wayLookupRead
143  val csr_parity_enable  = if (ICacheForceMetaECCError || ICacheForceDataECCError) true.B else io.csr_parity_enable
144
145  // Statistics on the frequency distribution of FTQ fire interval
146  val cntFtqFireInterval = RegInit(0.U(32.W))
147  cntFtqFireInterval := Mux(fromFtq.fire, 1.U, cntFtqFireInterval + 1.U)
148  XSPerfHistogram("ftq2icache_fire", cntFtqFireInterval, fromFtq.fire, 1, 300, 1, right_strict = true)
149
150  /** pipeline control signal */
151  val s1_ready, s2_ready           = Wire(Bool())
152  val s0_fire, s1_fire, s2_fire    = Wire(Bool())
153  val s0_flush, s1_flush, s2_flush = Wire(Bool())
154
155  /**
156    ******************************************************************************
157    * ICache Stage 0
158    * - send req to data SRAM
159    * - get waymask and tlb info from wayLookup
160    ******************************************************************************
161    */
162
163  /** s0 control */
164  // 0,1,2,3 -> dataArray(data); 4 -> mainPipe
165  // Ftq RegNext Register
166  val fromFtqReq       = fromFtq.bits.pcMemRead
167  val s0_valid         = fromFtq.valid
168  val s0_req_valid_all = (0 until partWayNum + 1).map(i => fromFtq.bits.readValid(i))
169  val s0_req_vaddr_all =
170    (0 until partWayNum + 1).map(i => VecInit(Seq(fromFtqReq(i).startAddr, fromFtqReq(i).nextlineStart)))
171  val s0_req_vSetIdx_all = (0 until partWayNum + 1).map(i => VecInit(s0_req_vaddr_all(i).map(get_idx)))
172  val s0_req_offset_all  = (0 until partWayNum + 1).map(i => s0_req_vaddr_all(i)(0)(log2Ceil(blockBytes) - 1, 0))
173  val s0_doubleline_all  = (0 until partWayNum + 1).map(i => fromFtq.bits.readValid(i) && fromFtqReq(i).crossCacheline)
174
175  val s0_req_vaddr   = s0_req_vaddr_all.last
176  val s0_req_vSetIdx = s0_req_vSetIdx_all.last
177  val s0_doubleline  = s0_doubleline_all.last
178
179  val s0_backendException = fromFtq.bits.backendException
180
181  /**
182    ******************************************************************************
183    * get waymask and tlb info from wayLookup
184    ******************************************************************************
185    */
186  fromWayLookup.ready := s0_fire
187  val s0_waymasks              = VecInit(fromWayLookup.bits.waymask.map(_.asTypeOf(Vec(nWays, Bool()))))
188  val s0_req_ptags             = fromWayLookup.bits.ptag
189  val s0_req_gpaddr            = fromWayLookup.bits.gpaddr
190  val s0_req_isForVSnonLeafPTE = fromWayLookup.bits.isForVSnonLeafPTE
191  val s0_itlb_exception        = fromWayLookup.bits.itlb_exception
192  val s0_itlb_pbmt             = fromWayLookup.bits.itlb_pbmt
193  val s0_meta_codes            = fromWayLookup.bits.meta_codes
194  val s0_hits                  = VecInit(fromWayLookup.bits.waymask.map(_.orR))
195
196  when(s0_fire) {
197    assert(
198      (0 until PortNumber).map(i => s0_req_vSetIdx(i) === fromWayLookup.bits.vSetIdx(i)).reduce(_ && _),
199      "vSetIdxs from ftq and wayLookup are different! vaddr0=0x%x ftq: vidx0=0x%x vidx1=0x%x wayLookup: vidx0=0x%x vidx1=0x%x",
200      s0_req_vaddr(0),
201      s0_req_vSetIdx(0),
202      s0_req_vSetIdx(1),
203      fromWayLookup.bits.vSetIdx(0),
204      fromWayLookup.bits.vSetIdx(1)
205    )
206  }
207
208  /**
209    ******************************************************************************
210    * data SRAM request
211    ******************************************************************************
212    */
213  for (i <- 0 until partWayNum) {
214    toData(i).valid             := s0_req_valid_all(i)
215    toData(i).bits.isDoubleLine := s0_doubleline_all(i)
216    toData(i).bits.vSetIdx      := s0_req_vSetIdx_all(i)
217    toData(i).bits.blkOffset    := s0_req_offset_all(i)
218    toData(i).bits.wayMask      := s0_waymasks
219  }
220
221  val s0_can_go = toData.last.ready && fromWayLookup.valid && s1_ready
222  s0_flush := io.flush
223  s0_fire  := s0_valid && s0_can_go && !s0_flush
224
225  fromFtq.ready := s0_can_go
226
227  /**
228    ******************************************************************************
229    * ICache Stage 1
230    * - PMP check
231    * - get Data SRAM read responses (latched for pipeline stop)
232    * - monitor missUint response port
233    ******************************************************************************
234    */
235  val s1_valid = generatePipeControl(lastFire = s0_fire, thisFire = s1_fire, thisFlush = s1_flush, lastFlush = false.B)
236
237  val s1_req_vaddr             = RegEnable(s0_req_vaddr, 0.U.asTypeOf(s0_req_vaddr), s0_fire)
238  val s1_req_ptags             = RegEnable(s0_req_ptags, 0.U.asTypeOf(s0_req_ptags), s0_fire)
239  val s1_req_gpaddr            = RegEnable(s0_req_gpaddr, 0.U.asTypeOf(s0_req_gpaddr), s0_fire)
240  val s1_req_isForVSnonLeafPTE = RegEnable(s0_req_isForVSnonLeafPTE, 0.U.asTypeOf(s0_req_isForVSnonLeafPTE), s0_fire)
241  val s1_doubleline            = RegEnable(s0_doubleline, 0.U.asTypeOf(s0_doubleline), s0_fire)
242  val s1_SRAMhits              = RegEnable(s0_hits, 0.U.asTypeOf(s0_hits), s0_fire)
243  val s1_itlb_exception        = RegEnable(s0_itlb_exception, 0.U.asTypeOf(s0_itlb_exception), s0_fire)
244  val s1_backendException      = RegEnable(s0_backendException, false.B, s0_fire)
245  val s1_itlb_pbmt             = RegEnable(s0_itlb_pbmt, 0.U.asTypeOf(s0_itlb_pbmt), s0_fire)
246  val s1_waymasks              = RegEnable(s0_waymasks, 0.U.asTypeOf(s0_waymasks), s0_fire)
247  val s1_meta_codes            = RegEnable(s0_meta_codes, 0.U.asTypeOf(s0_meta_codes), s0_fire)
248
249  val s1_req_vSetIdx = s1_req_vaddr.map(get_idx)
250  val s1_req_paddr   = s1_req_vaddr.zip(s1_req_ptags).map { case (vaddr, ptag) => get_paddr_from_ptag(vaddr, ptag) }
251  val s1_req_offset  = s1_req_vaddr(0)(log2Ceil(blockBytes) - 1, 0)
252
253  // do metaArray ECC check
254  val s1_meta_corrupt = VecInit((s1_req_ptags zip s1_meta_codes zip s1_waymasks).map { case ((meta, code), waymask) =>
255    val hit_num = PopCount(waymask)
256    // NOTE: if not hit, encodeMetaECC(meta) =/= code can also be true, but we don't care about it
257    (encodeMetaECC(meta) =/= code && hit_num === 1.U) || // hit one way, but parity code does not match, ECC failure
258    hit_num > 1.U                                        // hit multi way, must be a ECC failure
259  })
260  // force clear meta_corrupt when parity check is disabled
261  when(!csr_parity_enable) {
262    s1_meta_corrupt := VecInit(Seq.fill(PortNumber)(false.B))
263  }
264
265  /**
266    ******************************************************************************
267    * update replacement status register
268    ******************************************************************************
269    */
270  (0 until PortNumber).foreach { i =>
271    io.touch(i).bits.vSetIdx := s1_req_vSetIdx(i)
272    io.touch(i).bits.way     := OHToUInt(s1_waymasks(i))
273  }
274  io.touch(0).valid := RegNext(s0_fire) && s1_SRAMhits(0)
275  io.touch(1).valid := RegNext(s0_fire) && s1_SRAMhits(1) && s1_doubleline
276
277  /**
278    ******************************************************************************
279    * PMP check
280    ******************************************************************************
281    */
282  toPMP.zipWithIndex.foreach { case (p, i) =>
283    // if itlb has exception, paddr can be invalid, therefore pmp check can be skipped
284    p.valid     := s1_valid // && !ExceptionType.hasException(s1_itlb_exception(i))
285    p.bits.addr := s1_req_paddr(i)
286    p.bits.size := 3.U      // TODO
287    p.bits.cmd  := TlbCmd.exec
288  }
289  val s1_pmp_exception = VecInit(fromPMP.map(ExceptionType.fromPMPResp))
290  val s1_pmp_mmio      = VecInit(fromPMP.map(_.mmio))
291
292  // merge s1 itlb/pmp exceptions, itlb has the highest priority, pmp next
293  val s1_exception_out = ExceptionType.merge(
294    s1_itlb_exception,
295    s1_pmp_exception
296  )
297
298  /**
299    ******************************************************************************
300    * select data from MSHR, SRAM
301    ******************************************************************************
302    */
303  val s1_MSHR_match = VecInit((0 until PortNumber).map(i =>
304    (s1_req_vSetIdx(i) === fromMSHR.bits.vSetIdx) &&
305      (s1_req_ptags(i) === getPhyTagFromBlk(fromMSHR.bits.blkPaddr)) &&
306      fromMSHR.valid && !fromMSHR.bits.corrupt
307  ))
308  val s1_MSHR_hits  = Seq(s1_valid && s1_MSHR_match(0), s1_valid && (s1_MSHR_match(1) && s1_doubleline))
309  val s1_MSHR_datas = fromMSHR.bits.data.asTypeOf(Vec(ICacheDataBanks, UInt((blockBits / ICacheDataBanks).W)))
310
311  val s1_hits = (0 until PortNumber).map(i =>
312    ValidHoldBypass(s1_MSHR_hits(i) || (RegNext(s0_fire) && s1_SRAMhits(i)), s1_fire || s1_flush)
313  )
314
315  val s1_bankIdxLow = s1_req_offset >> log2Ceil(blockBytes / ICacheDataBanks)
316  val s1_bankMSHRHit = VecInit((0 until ICacheDataBanks).map(i =>
317    (i.U >= s1_bankIdxLow) && s1_MSHR_hits(0) ||
318      (i.U < s1_bankIdxLow) && s1_MSHR_hits(1)
319  ))
320  val s1_datas = VecInit((0 until ICacheDataBanks).map(i =>
321    DataHoldBypass(Mux(s1_bankMSHRHit(i), s1_MSHR_datas(i), fromData.datas(i)), s1_bankMSHRHit(i) || RegNext(s0_fire))
322  ))
323  val s1_data_is_from_MSHR = VecInit((0 until ICacheDataBanks).map(i =>
324    DataHoldBypass(s1_bankMSHRHit(i), s1_bankMSHRHit(i) || RegNext(s0_fire))
325  ))
326  val s1_codes = DataHoldBypass(fromData.codes, RegNext(s0_fire))
327
328  s1_flush := io.flush
329  s1_ready := s2_ready || !s1_valid
330  s1_fire  := s1_valid && s2_ready && !s1_flush
331
332  /**
333    ******************************************************************************
334    * ICache Stage 2
335    * - send request to MSHR if ICache miss
336    * - monitor missUint response port
337    * - response to IFU
338    ******************************************************************************
339    */
340
341  val s2_valid = generatePipeControl(lastFire = s1_fire, thisFire = s2_fire, thisFlush = s2_flush, lastFlush = false.B)
342
343  val s2_req_vaddr             = RegEnable(s1_req_vaddr, 0.U.asTypeOf(s1_req_vaddr), s1_fire)
344  val s2_req_ptags             = RegEnable(s1_req_ptags, 0.U.asTypeOf(s1_req_ptags), s1_fire)
345  val s2_req_gpaddr            = RegEnable(s1_req_gpaddr, 0.U.asTypeOf(s1_req_gpaddr), s1_fire)
346  val s2_req_isForVSnonLeafPTE = RegEnable(s1_req_isForVSnonLeafPTE, 0.U.asTypeOf(s1_req_isForVSnonLeafPTE), s1_fire)
347  val s2_doubleline            = RegEnable(s1_doubleline, 0.U.asTypeOf(s1_doubleline), s1_fire)
348  val s2_exception             = RegEnable(s1_exception_out, 0.U.asTypeOf(s1_exception_out), s1_fire)
349  val s2_backendException      = RegEnable(s1_backendException, false.B, s1_fire)
350  val s2_pmp_mmio              = RegEnable(s1_pmp_mmio, 0.U.asTypeOf(s1_pmp_mmio), s1_fire)
351  val s2_itlb_pbmt             = RegEnable(s1_itlb_pbmt, 0.U.asTypeOf(s1_itlb_pbmt), s1_fire)
352  val s2_waymasks              = RegEnable(s1_waymasks, 0.U.asTypeOf(s1_waymasks), s1_fire)
353
354  val s2_req_vSetIdx = s2_req_vaddr.map(get_idx)
355  val s2_req_offset  = s2_req_vaddr(0)(log2Ceil(blockBytes) - 1, 0)
356  val s2_req_paddr   = s2_req_vaddr.zip(s2_req_ptags).map { case (vaddr, ptag) => get_paddr_from_ptag(vaddr, ptag) }
357
358  val s2_SRAMhits          = RegEnable(s1_SRAMhits, 0.U.asTypeOf(s1_SRAMhits), s1_fire)
359  val s2_codes             = RegEnable(s1_codes, 0.U.asTypeOf(s1_codes), s1_fire)
360  val s2_hits              = RegInit(VecInit(Seq.fill(PortNumber)(false.B)))
361  val s2_datas             = RegInit(VecInit(Seq.fill(ICacheDataBanks)(0.U((blockBits / ICacheDataBanks).W))))
362  val s2_data_is_from_MSHR = RegInit(VecInit(Seq.fill(ICacheDataBanks)(false.B)))
363
364  /**
365    ******************************************************************************
366    * ECC check
367    ******************************************************************************
368    */
369  // check data error
370  val s2_bankSel      = getBankSel(s2_req_offset, s2_valid)
371  val s2_bank_corrupt = (0 until ICacheDataBanks).map(i => encodeDataECC(s2_datas(i)) =/= s2_codes(i))
372  // if data is from MSHR, we don't need to check ECC
373  val s2_data_corrupt = VecInit((0 until PortNumber).map(port =>
374    (0 until ICacheDataBanks).map(bank =>
375      s2_bank_corrupt(bank) && s2_bankSel(port)(bank).asBool && !s2_data_is_from_MSHR(bank)
376    ).reduce(_ || _) && s2_SRAMhits(port)
377  ))
378  // force clear data_corrupt when parity check is disabled
379  when(!csr_parity_enable) {
380    s2_data_corrupt := VecInit(Seq.fill(PortNumber)(false.B))
381  }
382  // meta error is checked in s1 stage
383  val s2_meta_corrupt = RegEnable(s1_meta_corrupt, 0.U.asTypeOf(s1_meta_corrupt), s1_fire)
384  // send errors to top
385  // TODO: support RERI spec standard interface
386  (0 until PortNumber).map { i =>
387    io.errors(i).valid              := (s2_meta_corrupt(i) || s2_data_corrupt(i)) && RegNext(s1_fire)
388    io.errors(i).bits.report_to_beu := (s2_meta_corrupt(i) || s2_data_corrupt(i)) && RegNext(s1_fire)
389    io.errors(i).bits.paddr         := s2_req_paddr(i)
390    io.errors(i).bits.source        := DontCare
391    io.errors(i).bits.source.tag    := s2_meta_corrupt(i)
392    io.errors(i).bits.source.data   := s2_data_corrupt(i)
393    io.errors(i).bits.source.l2     := false.B
394    io.errors(i).bits.opType        := DontCare
395    io.errors(i).bits.opType.fetch  := true.B
396  }
397  // flush metaArray to prepare for re-fetch
398  (0 until PortNumber).foreach { i =>
399    toMetaFlush(i).valid       := (s2_meta_corrupt(i) || s2_data_corrupt(i)) && RegNext(s1_fire)
400    toMetaFlush(i).bits.virIdx := s2_req_vSetIdx(i)
401    // if is meta corrupt, clear all way (since waymask may be unreliable)
402    // if is data corrupt, only clear the way that has error
403    toMetaFlush(i).bits.waymask := Mux(s2_meta_corrupt(i), Fill(nWays, true.B), s2_waymasks(i).asUInt)
404  }
405  // PERF: count the number of data parity errors
406  XSPerfAccumulate("data_corrupt_0", s2_data_corrupt(0) && RegNext(s1_fire))
407  XSPerfAccumulate("data_corrupt_1", s2_data_corrupt(1) && RegNext(s1_fire))
408  XSPerfAccumulate("meta_corrupt_0", s2_meta_corrupt(0) && RegNext(s1_fire))
409  XSPerfAccumulate("meta_corrupt_1", s2_meta_corrupt(1) && RegNext(s1_fire))
410  // TEST: stop simulation if parity error is detected, and dump wave
411//  val (assert_valid, assert_val) = DelayNWithValid(s2_meta_corrupt.reduce(_ || _), s2_valid, 1000)
412//  assert(!(assert_valid && assert_val))
413//  val (assert_valid, assert_val) = DelayNWithValid(s2_data_corrupt.reduce(_ || _), s2_valid, 1000)
414//  assert(!(assert_valid && assert_val))
415
416  /**
417    ******************************************************************************
418    * monitor missUint response port
419    ******************************************************************************
420    */
421  val s2_MSHR_match = VecInit((0 until PortNumber).map(i =>
422    (s2_req_vSetIdx(i) === fromMSHR.bits.vSetIdx) &&
423      (s2_req_ptags(i) === getPhyTagFromBlk(fromMSHR.bits.blkPaddr)) &&
424      fromMSHR.valid // we don't care about whether it's corrupt here
425  ))
426  val s2_MSHR_hits  = Seq(s2_valid && s2_MSHR_match(0), s2_valid && s2_MSHR_match(1) && s2_doubleline)
427  val s2_MSHR_datas = fromMSHR.bits.data.asTypeOf(Vec(ICacheDataBanks, UInt((blockBits / ICacheDataBanks).W)))
428
429  val s2_bankIdxLow = s2_req_offset >> log2Ceil(blockBytes / ICacheDataBanks)
430  val s2_bankMSHRHit = VecInit((0 until ICacheDataBanks).map(i =>
431    ((i.U >= s2_bankIdxLow) && s2_MSHR_hits(0)) || ((i.U < s2_bankIdxLow) && s2_MSHR_hits(1))
432  ))
433
434  (0 until ICacheDataBanks).foreach { i =>
435    when(s1_fire) {
436      s2_datas             := s1_datas
437      s2_data_is_from_MSHR := s1_data_is_from_MSHR
438    }.elsewhen(s2_bankMSHRHit(i)) {
439      s2_datas(i) := s2_MSHR_datas(i)
440      // also update s2_data_is_from_MSHR when re-fetched, to clear s2_data_corrupt flag and let s2_fire
441      s2_data_is_from_MSHR(i) := true.B
442    }
443  }
444
445  (0 until PortNumber).foreach { i =>
446    when(s1_fire) {
447      s2_hits := s1_hits
448    }.elsewhen(s2_MSHR_hits(i)) {
449      // update s2_hits even if it's corrupt, to let s2_fire
450      s2_hits(i) := true.B
451      // also clear s2_meta_corrupt flag when re-fetched, to let s2_fire
452      s2_meta_corrupt(i) := false.B
453    }
454  }
455
456  val s2_l2_corrupt = RegInit(VecInit(Seq.fill(PortNumber)(false.B)))
457  (0 until PortNumber).foreach { i =>
458    when(s1_fire) {
459      s2_l2_corrupt(i) := false.B
460    }.elsewhen(s2_MSHR_hits(i)) {
461      s2_l2_corrupt(i) := fromMSHR.bits.corrupt
462    }
463  }
464
465  /**
466    ******************************************************************************
467    * send request to MSHR if ICache miss / ECC corrupt
468    ******************************************************************************
469    */
470
471  // merge pmp mmio and itlb pbmt
472  val s2_mmio = VecInit((s2_pmp_mmio zip s2_itlb_pbmt).map { case (mmio, pbmt) =>
473    mmio || Pbmt.isUncache(pbmt)
474  })
475
476  // try re-fetch data from L2 cache if ECC error is detected, unless it's from MSHR
477  val s2_corrupt_refetch = (s2_meta_corrupt zip s2_data_corrupt).map {
478    case (meta, data) => meta || data
479  }
480
481  /* s2_exception includes itlb pf/gpf/af, pmp af and meta corruption (af), neither of which should be fetched
482   * mmio should not be fetched, it will be fetched by IFU mmio fsm
483   * also, if previous has exception, latter port should also not be fetched
484   */
485  val s2_should_fetch = VecInit((0 until PortNumber).map { i =>
486    (!s2_hits(i) || s2_corrupt_refetch(i)) &&
487    (if (i == 0) true.B else s2_doubleline) &&
488    !ExceptionType.hasException(s2_exception.take(i + 1)) &&
489    s2_mmio.take(i + 1).map(!_).reduce(_ && _)
490  })
491
492  val toMSHRArbiter = Module(new Arbiter(new ICacheMissReq, PortNumber))
493
494  // To avoid sending duplicate requests.
495  val s2_has_send = RegInit(VecInit(Seq.fill(PortNumber)(false.B)))
496  (0 until PortNumber).foreach { i =>
497    when(s1_fire) {
498      s2_has_send(i) := false.B
499    }.elsewhen(toMSHRArbiter.io.in(i).fire) {
500      s2_has_send(i) := true.B
501    }
502  }
503
504  (0 until PortNumber).map { i =>
505    toMSHRArbiter.io.in(i).valid         := s2_valid && s2_should_fetch(i) && !s2_has_send(i) && !s2_flush
506    toMSHRArbiter.io.in(i).bits.blkPaddr := getBlkAddr(s2_req_paddr(i))
507    toMSHRArbiter.io.in(i).bits.vSetIdx  := s2_req_vSetIdx(i)
508  }
509  toMSHR <> toMSHRArbiter.io.out
510
511  XSPerfAccumulate("to_missUnit_stall", toMSHR.valid && !toMSHR.ready)
512
513  val s2_fetch_finish = !s2_should_fetch.reduce(_ || _)
514
515  // also raise af if l2 corrupt is detected
516  val s2_l2_exception = VecInit(s2_l2_corrupt.map(ExceptionType.fromECC(true.B, _)))
517  // NOTE: do NOT raise af if meta/data corrupt is detected, they are automatically recovered by re-fetching from L2
518
519  // merge s2 exceptions, itlb has the highest priority, then l2
520  val s2_exception_out = ExceptionType.merge(
521    s2_exception, // includes itlb/pmp exception
522    s2_l2_exception
523  )
524
525  /**
526    ******************************************************************************
527    * response to IFU
528    ******************************************************************************
529    */
530  (0 until PortNumber).foreach { i =>
531    if (i == 0) {
532      toIFU(i).valid          := s2_fire
533      toIFU(i).bits.exception := s2_exception_out(i)
534      toIFU(i).bits.pmp_mmio  := s2_pmp_mmio(i) // pass pmp_mmio instead of merged mmio to IFU
535      toIFU(i).bits.itlb_pbmt := s2_itlb_pbmt(i)
536      toIFU(i).bits.data      := s2_datas.asTypeOf(UInt(blockBits.W))
537    } else {
538      toIFU(i).valid          := s2_fire && s2_doubleline
539      toIFU(i).bits.exception := Mux(s2_doubleline, s2_exception_out(i), ExceptionType.none)
540      toIFU(i).bits.pmp_mmio  := s2_pmp_mmio(i) && s2_doubleline
541      toIFU(i).bits.itlb_pbmt := Mux(s2_doubleline, s2_itlb_pbmt(i), Pbmt.pma)
542      toIFU(i).bits.data      := DontCare
543    }
544    toIFU(i).bits.backendException := s2_backendException
545    toIFU(i).bits.vaddr            := s2_req_vaddr(i)
546    toIFU(i).bits.paddr            := s2_req_paddr(i)
547    toIFU(i).bits.gpaddr           := s2_req_gpaddr // Note: toIFU(1).bits.gpaddr is actually DontCare in current design
548    toIFU(i).bits.isForVSnonLeafPTE := s2_req_isForVSnonLeafPTE
549  }
550
551  s2_flush := io.flush
552  s2_ready := (s2_fetch_finish && !io.respStall) || !s2_valid
553  s2_fire  := s2_valid && s2_fetch_finish && !io.respStall && !s2_flush
554
555  /**
556    ******************************************************************************
557    * report Tilelink corrupt error
558    ******************************************************************************
559    */
560  (0 until PortNumber).map { i =>
561    when(RegNext(s2_fire && s2_l2_corrupt(i))) {
562      io.errors(i).valid              := true.B
563      io.errors(i).bits.report_to_beu := false.B // l2 should have report that to bus error unit, no need to do it again
564      io.errors(i).bits.paddr         := RegNext(s2_req_paddr(i))
565      io.errors(i).bits.source.tag    := false.B
566      io.errors(i).bits.source.data   := false.B
567      io.errors(i).bits.source.l2     := true.B
568    }
569  }
570
571  /**
572    ******************************************************************************
573    * performance info. TODO: need to simplify the logic
574    ***********************************************************s*******************
575    */
576  io.perfInfo.only_0_hit      := s2_hits(0) && !s2_doubleline
577  io.perfInfo.only_0_miss     := !s2_hits(0) && !s2_doubleline
578  io.perfInfo.hit_0_hit_1     := s2_hits(0) && s2_hits(1) && s2_doubleline
579  io.perfInfo.hit_0_miss_1    := s2_hits(0) && !s2_hits(1) && s2_doubleline
580  io.perfInfo.miss_0_hit_1    := !s2_hits(0) && s2_hits(1) && s2_doubleline
581  io.perfInfo.miss_0_miss_1   := !s2_hits(0) && !s2_hits(1) && s2_doubleline
582  io.perfInfo.hit_0_except_1  := s2_hits(0) && (ExceptionType.hasException(s2_exception(1))) && s2_doubleline
583  io.perfInfo.miss_0_except_1 := !s2_hits(0) && (ExceptionType.hasException(s2_exception(1))) && s2_doubleline
584  io.perfInfo.bank_hit(0)     := s2_hits(0)
585  io.perfInfo.bank_hit(1)     := s2_hits(1) && s2_doubleline
586  io.perfInfo.except_0        := ExceptionType.hasException(s2_exception(0))
587  io.perfInfo.hit             := s2_hits(0) && (!s2_doubleline || s2_hits(1))
588
589  /** <PERF> fetch bubble generated by icache miss */
590  XSPerfAccumulate("icache_bubble_s2_miss", s2_valid && !s2_fetch_finish)
591  XSPerfAccumulate("icache_bubble_s0_wayLookup", s0_valid && !fromWayLookup.ready)
592
593  io.fetch.topdownIcacheMiss := !s2_fetch_finish
594  io.fetch.topdownItlbMiss   := s0_valid && !fromWayLookup.ready
595
596  // class ICacheTouchDB(implicit p: Parameters) extends ICacheBundle{
597  //   val blkPaddr  = UInt((PAddrBits - blockOffBits).W)
598  //   val vSetIdx   = UInt(idxBits.W)
599  //   val waymask   = UInt(log2Ceil(nWays).W)
600  // }
601
602  // val isWriteICacheTouchTable = WireInit(Constantin.createRecord("isWriteICacheTouchTable" + p(XSCoreParamsKey).HartId.toString))
603  // val ICacheTouchTable = ChiselDB.createTable("ICacheTouchTable" + p(XSCoreParamsKey).HartId.toString, new ICacheTouchDB)
604
605  // val ICacheTouchDumpData = Wire(Vec(PortNumber, new ICacheTouchDB))
606  // (0 until PortNumber).foreach{ i =>
607  //   ICacheTouchDumpData(i).blkPaddr  := getBlkAddr(s2_req_paddr(i))
608  //   ICacheTouchDumpData(i).vSetIdx   := s2_req_vSetIdx(i)
609  //   ICacheTouchDumpData(i).waymask   := OHToUInt(s2_tag_match_vec(i))
610  //   ICacheTouchTable.log(
611  //     data  = ICacheTouchDumpData(i),
612  //     en    = io.touch(i).valid,
613  //     site  = "req_" + i.toString,
614  //     clock = clock,
615  //     reset = reset
616  //   )
617  // }
618
619  /**
620    ******************************************************************************
621    * difftest refill check
622    ******************************************************************************
623    */
624  if (env.EnableDifftest) {
625    val discards = (0 until PortNumber).map { i =>
626      val discard = ExceptionType.hasException(toIFU(i).bits.exception) || toIFU(i).bits.pmp_mmio ||
627        Pbmt.isUncache(toIFU(i).bits.itlb_pbmt)
628      discard
629    }
630    val blkPaddrAll = s2_req_paddr.map(addr => addr(PAddrBits - 1, blockOffBits) << blockOffBits)
631    (0 until ICacheDataBanks).map { i =>
632      val diffMainPipeOut = DifftestModule(new DiffRefillEvent, dontCare = true)
633      diffMainPipeOut.coreid := io.hartId
634      diffMainPipeOut.index  := (3 + i).U
635
636      val bankSel = getBankSel(s2_req_offset, s2_valid).reduce(_ | _)
637      val lineSel = getLineSel(s2_req_offset)
638
639      diffMainPipeOut.valid := s2_fire && bankSel(i).asBool && Mux(lineSel(i), !discards(1), !discards(0))
640      diffMainPipeOut.addr := Mux(
641        lineSel(i),
642        blkPaddrAll(1) + (i.U << (log2Ceil(blockBytes / ICacheDataBanks))),
643        blkPaddrAll(0) + (i.U << (log2Ceil(blockBytes / ICacheDataBanks)))
644      )
645
646      diffMainPipeOut.data  := s2_datas(i).asTypeOf(diffMainPipeOut.data)
647      diffMainPipeOut.idtfr := DontCare
648    }
649  }
650}
651