xref: /XiangShan/src/main/scala/xiangshan/frontend/IFU.scala (revision aeedc8ee24c606b62f87b4a2382c7af1cca1fcd7)
1/***************************************************************************************
2* Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC)
3* Copyright (c) 2020-2024 Institute of Computing Technology, Chinese Academy of Sciences
4* Copyright (c) 2020-2021 Peng Cheng Laboratory
5*
6* XiangShan is licensed under Mulan PSL v2.
7* You can use this software according to the terms and conditions of the Mulan PSL v2.
8* You may obtain a copy of Mulan PSL v2 at:
9*          http://license.coscl.org.cn/MulanPSL2
10*
11* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
12* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
13* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
14*
15* See the Mulan PSL v2 for more details.
16***************************************************************************************/
17
18package xiangshan.frontend
19
20import org.chipsalliance.cde.config.Parameters
21import chisel3._
22import chisel3.util._
23import freechips.rocketchip.rocket.RVCDecoder
24import xiangshan._
25import xiangshan.cache.mmu._
26import xiangshan.frontend.icache._
27import utils._
28import utility._
29import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
30import utility.ChiselDB
31
32trait HasInstrMMIOConst extends HasXSParameter with HasIFUConst{
33  def mmioBusWidth = 64
34  def mmioBusBytes = mmioBusWidth / 8
35  def maxInstrLen = 32
36}
37
38trait HasIFUConst extends HasXSParameter{
39  def addrAlign(addr: UInt, bytes: Int, highest: Int): UInt = Cat(addr(highest-1, log2Ceil(bytes)), 0.U(log2Ceil(bytes).W))
40  def fetchQueueSize = 2
41
42  def getBasicBlockIdx( pc: UInt, start:  UInt ): UInt = {
43    val byteOffset = pc - start
44    (byteOffset - instBytes.U)(log2Ceil(PredictWidth),instOffsetBits)
45  }
46}
47
48class IfuToFtqIO(implicit p:Parameters) extends XSBundle {
49  val pdWb = Valid(new PredecodeWritebackBundle)
50}
51
52class IfuToBackendIO(implicit p:Parameters) extends XSBundle {
53  // write to backend gpaddr mem
54  val gpaddrMem_wen = Output(Bool())
55  val gpaddrMem_waddr = Output(UInt(log2Ceil(FtqSize).W)) // Ftq Ptr
56  // 2 gpaddrs, correspond to startAddr & nextLineAddr in bundle FtqICacheInfo
57  // TODO: avoid cross page entry in Ftq
58  val gpaddrMem_wdata = Output(UInt(GPAddrBits.W))
59}
60
61class FtqInterface(implicit p: Parameters) extends XSBundle {
62  val fromFtq = Flipped(new FtqToIfuIO)
63  val toFtq   = new IfuToFtqIO
64}
65
66class UncacheInterface(implicit p: Parameters) extends XSBundle {
67  val fromUncache = Flipped(DecoupledIO(new InsUncacheResp))
68  val toUncache   = DecoupledIO( new InsUncacheReq )
69}
70
71class NewIFUIO(implicit p: Parameters) extends XSBundle {
72  val ftqInter         = new FtqInterface
73  val icacheInter      = Flipped(new IFUICacheIO)
74  val icacheStop       = Output(Bool())
75  val icachePerfInfo   = Input(new ICachePerfInfo)
76  val toIbuffer        = Decoupled(new FetchToIBuffer)
77  val toBackend        = new IfuToBackendIO
78  val uncacheInter     = new UncacheInterface
79  val frontendTrigger  = Flipped(new FrontendTdataDistributeIO)
80  val rob_commits      = Flipped(Vec(CommitWidth, Valid(new RobCommitInfo)))
81  val iTLBInter        = new TlbRequestIO
82  val pmp              = new ICachePMPBundle
83  val mmioCommitRead   = new mmioCommitRead
84  val illBuf          = Output(UInt(32.W))
85}
86
87// record the situation in which fallThruAddr falls into
88// the middle of an RVI inst
89class LastHalfInfo(implicit p: Parameters) extends XSBundle {
90  val valid = Bool()
91  val middlePC = UInt(VAddrBits.W)
92  def matchThisBlock(startAddr: UInt) = valid && middlePC === startAddr
93}
94
95class IfuToPreDecode(implicit p: Parameters) extends XSBundle {
96  val data                =  if(HasCExtension) Vec(PredictWidth + 1, UInt(16.W)) else Vec(PredictWidth, UInt(32.W))
97  val frontendTrigger     = new FrontendTdataDistributeIO
98  val pc                  = Vec(PredictWidth, UInt(VAddrBits.W))
99}
100
101
102class IfuToPredChecker(implicit p: Parameters) extends XSBundle {
103  val ftqOffset     = Valid(UInt(log2Ceil(PredictWidth).W))
104  val jumpOffset    = Vec(PredictWidth, UInt(XLEN.W))
105  val target        = UInt(VAddrBits.W)
106  val instrRange    = Vec(PredictWidth, Bool())
107  val instrValid    = Vec(PredictWidth, Bool())
108  val pds           = Vec(PredictWidth, new PreDecodeInfo)
109  val pc            = Vec(PredictWidth, UInt(VAddrBits.W))
110  val fire_in       = Bool()
111}
112
113class FetchToIBufferDB extends Bundle {
114  val start_addr = UInt(39.W)
115  val instr_count = UInt(32.W)
116  val exception = Bool()
117  val is_cache_hit = Bool()
118}
119
120class IfuWbToFtqDB extends Bundle {
121  val start_addr = UInt(39.W)
122  val is_miss_pred = Bool()
123  val miss_pred_offset = UInt(32.W)
124  val checkJalFault = Bool()
125  val checkRetFault = Bool()
126  val checkTargetFault = Bool()
127  val checkNotCFIFault = Bool()
128  val checkInvalidTaken = Bool()
129}
130
131class NewIFU(implicit p: Parameters) extends XSModule
132  with HasICacheParameters
133  with HasXSParameter
134  with HasIFUConst
135  with HasPdConst
136  with HasCircularQueuePtrHelper
137  with HasPerfEvents
138  with HasTlbConst
139{
140  val io = IO(new NewIFUIO)
141  val (toFtq, fromFtq)    = (io.ftqInter.toFtq, io.ftqInter.fromFtq)
142  val fromICache = io.icacheInter.resp
143  val (toUncache, fromUncache) = (io.uncacheInter.toUncache , io.uncacheInter.fromUncache)
144
145  def isCrossLineReq(start: UInt, end: UInt): Bool = start(blockOffBits) ^ end(blockOffBits)
146
147  def numOfStage = 3
148  // equal lower_result overflow bit
149  def PcCutPoint = (VAddrBits/4) - 1
150  def CatPC(low: UInt, high: UInt, high1: UInt): UInt = {
151    Mux(
152      low(PcCutPoint),
153      Cat(high1, low(PcCutPoint-1, 0)),
154      Cat(high, low(PcCutPoint-1, 0))
155    )
156  }
157  def CatPC(lowVec: Vec[UInt], high: UInt, high1: UInt): Vec[UInt] = VecInit(lowVec.map(CatPC(_, high, high1)))
158  require(numOfStage > 1, "BPU numOfStage must be greater than 1")
159  val topdown_stages = RegInit(VecInit(Seq.fill(numOfStage)(0.U.asTypeOf(new FrontendTopDownBundle))))
160  // bubble events in IFU, only happen in stage 1
161  val icacheMissBubble = Wire(Bool())
162  val itlbMissBubble =Wire(Bool())
163
164  // only driven by clock, not valid-ready
165  topdown_stages(0) := fromFtq.req.bits.topdown_info
166  for (i <- 1 until numOfStage) {
167    topdown_stages(i) := topdown_stages(i - 1)
168  }
169  when (icacheMissBubble) {
170    topdown_stages(1).reasons(TopDownCounters.ICacheMissBubble.id) := true.B
171  }
172  when (itlbMissBubble) {
173    topdown_stages(1).reasons(TopDownCounters.ITLBMissBubble.id) := true.B
174  }
175  io.toIbuffer.bits.topdown_info := topdown_stages(numOfStage - 1)
176  when (fromFtq.topdown_redirect.valid) {
177    // only redirect from backend, IFU redirect itself is handled elsewhere
178    when (fromFtq.topdown_redirect.bits.debugIsCtrl) {
179      /*
180      for (i <- 0 until numOfStage) {
181        topdown_stages(i).reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
182      }
183      io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
184      */
185      when (fromFtq.topdown_redirect.bits.ControlBTBMissBubble) {
186        for (i <- 0 until numOfStage) {
187          topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B
188        }
189        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B
190      } .elsewhen (fromFtq.topdown_redirect.bits.TAGEMissBubble) {
191        for (i <- 0 until numOfStage) {
192          topdown_stages(i).reasons(TopDownCounters.TAGEMissBubble.id) := true.B
193        }
194        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.TAGEMissBubble.id) := true.B
195      } .elsewhen (fromFtq.topdown_redirect.bits.SCMissBubble) {
196        for (i <- 0 until numOfStage) {
197          topdown_stages(i).reasons(TopDownCounters.SCMissBubble.id) := true.B
198        }
199        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.SCMissBubble.id) := true.B
200      } .elsewhen (fromFtq.topdown_redirect.bits.ITTAGEMissBubble) {
201        for (i <- 0 until numOfStage) {
202          topdown_stages(i).reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
203        }
204        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
205      } .elsewhen (fromFtq.topdown_redirect.bits.RASMissBubble) {
206        for (i <- 0 until numOfStage) {
207          topdown_stages(i).reasons(TopDownCounters.RASMissBubble.id) := true.B
208        }
209        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.RASMissBubble.id) := true.B
210      }
211    } .elsewhen (fromFtq.topdown_redirect.bits.debugIsMemVio) {
212      for (i <- 0 until numOfStage) {
213        topdown_stages(i).reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
214      }
215      io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
216    } .otherwise {
217      for (i <- 0 until numOfStage) {
218        topdown_stages(i).reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
219      }
220      io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
221    }
222  }
223
224  class TlbExept(implicit p: Parameters) extends XSBundle{
225    val pageFault = Bool()
226    val accessFault = Bool()
227    val mmio = Bool()
228  }
229
230  val preDecoder       = Module(new PreDecode)
231
232  val predChecker     = Module(new PredChecker)
233  val frontendTrigger = Module(new FrontendTrigger)
234  val (checkerIn, checkerOutStage1, checkerOutStage2)         = (predChecker.io.in, predChecker.io.out.stage1Out,predChecker.io.out.stage2Out)
235
236  /**
237    ******************************************************************************
238    * IFU Stage 0
239    * - send cacheline fetch request to ICacheMainPipe
240    ******************************************************************************
241    */
242
243  val f0_valid                             = fromFtq.req.valid
244  val f0_ftq_req                           = fromFtq.req.bits
245  val f0_doubleLine                        = fromFtq.req.bits.crossCacheline
246  val f0_vSetIdx                           = VecInit(get_idx((f0_ftq_req.startAddr)), get_idx(f0_ftq_req.nextlineStart))
247  val f0_fire                              = fromFtq.req.fire
248
249  val f0_flush, f1_flush, f2_flush, f3_flush = WireInit(false.B)
250  val from_bpu_f0_flush, from_bpu_f1_flush, from_bpu_f2_flush, from_bpu_f3_flush = WireInit(false.B)
251
252  from_bpu_f0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(f0_ftq_req.ftqIdx) ||
253                       fromFtq.flushFromBpu.shouldFlushByStage3(f0_ftq_req.ftqIdx)
254
255  val wb_redirect , mmio_redirect,  backend_redirect= WireInit(false.B)
256  val f3_wb_not_flush = WireInit(false.B)
257
258  backend_redirect := fromFtq.redirect.valid
259  f3_flush := backend_redirect || (wb_redirect && !f3_wb_not_flush)
260  f2_flush := backend_redirect || mmio_redirect || wb_redirect
261  f1_flush := f2_flush || from_bpu_f1_flush
262  f0_flush := f1_flush || from_bpu_f0_flush
263
264  val f1_ready, f2_ready, f3_ready         = WireInit(false.B)
265
266  fromFtq.req.ready := f1_ready && io.icacheInter.icacheReady
267
268
269  when (wb_redirect) {
270    when (f3_wb_not_flush) {
271      topdown_stages(2).reasons(TopDownCounters.BTBMissBubble.id) := true.B
272    }
273    for (i <- 0 until numOfStage - 1) {
274      topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B
275    }
276  }
277
278  /** <PERF> f0 fetch bubble */
279
280  XSPerfAccumulate("fetch_bubble_ftq_not_valid",   !fromFtq.req.valid && fromFtq.req.ready  )
281  // XSPerfAccumulate("fetch_bubble_pipe_stall",    f0_valid && toICache(0).ready && toICache(1).ready && !f1_ready )
282  // XSPerfAccumulate("fetch_bubble_icache_0_busy",   f0_valid && !toICache(0).ready  )
283  // XSPerfAccumulate("fetch_bubble_icache_1_busy",   f0_valid && !toICache(1).ready  )
284  XSPerfAccumulate("fetch_flush_backend_redirect",   backend_redirect  )
285  XSPerfAccumulate("fetch_flush_wb_redirect",    wb_redirect  )
286  XSPerfAccumulate("fetch_flush_bpu_f1_flush",   from_bpu_f1_flush  )
287  XSPerfAccumulate("fetch_flush_bpu_f0_flush",   from_bpu_f0_flush  )
288
289
290  /**
291    ******************************************************************************
292    * IFU Stage 1
293    * - calculate pc/half_pc/cut_ptr for every instruction
294    ******************************************************************************
295    */
296
297  val f1_valid      = RegInit(false.B)
298  val f1_ftq_req    = RegEnable(f0_ftq_req,    f0_fire)
299  // val f1_situation  = RegEnable(f0_situation,  f0_fire)
300  val f1_doubleLine = RegEnable(f0_doubleLine, f0_fire)
301  val f1_vSetIdx    = RegEnable(f0_vSetIdx,    f0_fire)
302  val f1_fire       = f1_valid && f2_ready
303
304  f1_ready := f1_fire || !f1_valid
305
306  from_bpu_f1_flush := fromFtq.flushFromBpu.shouldFlushByStage3(f1_ftq_req.ftqIdx) && f1_valid
307  // from_bpu_f1_flush := false.B
308
309  when(f1_flush)                  {f1_valid  := false.B}
310  .elsewhen(f0_fire && !f0_flush) {f1_valid  := true.B}
311  .elsewhen(f1_fire)              {f1_valid  := false.B}
312
313  val f1_pc_high            = f1_ftq_req.startAddr(VAddrBits-1, PcCutPoint)
314  val f1_pc_high_plus1      = f1_pc_high + 1.U
315
316  /**
317   * In order to reduce power consumption, avoid calculating the full PC value in the first level.
318   * code of original logic, this code has been deprecated
319   * val f1_pc                 = VecInit(f1_pc_lower_result.map{ i =>
320   *  Mux(i(f1_pc_adder_cut_point), Cat(f1_pc_high_plus1,i(f1_pc_adder_cut_point-1,0)), Cat(f1_pc_high,i(f1_pc_adder_cut_point-1,0)))})
321   *
322   */
323  val f1_pc_lower_result    = VecInit((0 until PredictWidth).map(i => Cat(0.U(1.W), f1_ftq_req.startAddr(PcCutPoint-1, 0)) + (i * 2).U)) // cat with overflow bit
324
325  val f1_pc                 = CatPC(f1_pc_lower_result, f1_pc_high, f1_pc_high_plus1)
326
327  val f1_half_snpc_lower_result = VecInit((0 until PredictWidth).map(i => Cat(0.U(1.W), f1_ftq_req.startAddr(PcCutPoint-1, 0)) + ((i+2) * 2).U)) // cat with overflow bit
328  val f1_half_snpc            = CatPC(f1_half_snpc_lower_result, f1_pc_high, f1_pc_high_plus1)
329
330  if (env.FPGAPlatform){
331    val f1_pc_diff          = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + (i * 2).U))
332    val f1_half_snpc_diff   = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + ((i+2) * 2).U))
333
334    XSError(f1_pc.zip(f1_pc_diff).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_), "f1_half_snpc adder cut fail")
335    XSError(f1_half_snpc.zip(f1_half_snpc_diff).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_),  "f1_half_snpc adder cut fail")
336  }
337
338  val f1_cut_ptr            = if(HasCExtension)  VecInit((0 until PredictWidth + 1).map(i =>  Cat(0.U(2.W), f1_ftq_req.startAddr(blockOffBits-1, 1)) + i.U ))
339                                  else           VecInit((0 until PredictWidth).map(i =>     Cat(0.U(2.W), f1_ftq_req.startAddr(blockOffBits-1, 2)) + i.U ))
340
341  /**
342    ******************************************************************************
343    * IFU Stage 2
344    * - icache response data (latched for pipeline stop)
345    * - generate exceprion bits for every instruciton (page fault/access fault/mmio)
346    * - generate predicted instruction range (1 means this instruciton is in this fetch packet)
347    * - cut data from cachlines to packet instruction code
348    * - instruction predecode and RVC expand
349    ******************************************************************************
350    */
351
352  val icacheRespAllValid = WireInit(false.B)
353
354  val f2_valid      = RegInit(false.B)
355  val f2_ftq_req    = RegEnable(f1_ftq_req,    f1_fire)
356  // val f2_situation  = RegEnable(f1_situation,  f1_fire)
357  val f2_doubleLine = RegEnable(f1_doubleLine, f1_fire)
358  val f2_vSetIdx    = RegEnable(f1_vSetIdx,    f1_fire)
359  val f2_fire       = f2_valid && f3_ready && icacheRespAllValid
360
361  f2_ready := f2_fire || !f2_valid
362  //TODO: addr compare may be timing critical
363  val f2_icache_all_resp_wire       =  fromICache(0).valid && (fromICache(0).bits.vaddr ===  f2_ftq_req.startAddr) && ((fromICache(1).valid && (fromICache(1).bits.vaddr ===  f2_ftq_req.nextlineStart)) || !f2_doubleLine)
364  val f2_icache_all_resp_reg        = RegInit(false.B)
365
366  icacheRespAllValid := f2_icache_all_resp_reg || f2_icache_all_resp_wire
367
368  icacheMissBubble := io.icacheInter.topdownIcacheMiss
369  itlbMissBubble   := io.icacheInter.topdownItlbMiss
370
371  io.icacheStop := !f3_ready
372
373  when(f2_flush)                                              {f2_icache_all_resp_reg := false.B}
374  .elsewhen(f2_valid && f2_icache_all_resp_wire && !f3_ready) {f2_icache_all_resp_reg := true.B}
375  .elsewhen(f2_fire && f2_icache_all_resp_reg)                {f2_icache_all_resp_reg := false.B}
376
377  when(f2_flush)                  {f2_valid := false.B}
378  .elsewhen(f1_fire && !f1_flush) {f2_valid := true.B }
379  .elsewhen(f2_fire)              {f2_valid := false.B}
380
381  val f2_exception    = VecInit((0 until PortNumber).map(i => fromICache(i).bits.exception))
382  // paddr and gpaddr of [startAddr, nextLineAddr]
383  val f2_paddrs       = VecInit((0 until PortNumber).map(i => fromICache(i).bits.paddr))
384  val f2_gpaddr       = fromICache(0).bits.gpaddr
385
386  // FIXME: what if port 0 is not mmio, but port 1 is?
387  // cancel mmio fetch if exception occurs
388  val f2_mmio         = f2_exception(0) === ExceptionType.none && (
389    fromICache(0).bits.pmp_mmio ||
390      // currently, we do not distinguish between Pbmt.nc and Pbmt.io
391      // anyway, they are both non-cacheable, and should be handled with mmio fsm and sent to Uncache module
392      Pbmt.isUncache(fromICache(0).bits.itlb_pbmt)
393  )
394
395
396  /**
397    * reduce the number of registers, origin code
398    * f2_pc = RegEnable(f1_pc, f1_fire)
399    */
400  val f2_pc_lower_result        = RegEnable(f1_pc_lower_result, f1_fire)
401  val f2_pc_high                = RegEnable(f1_pc_high, f1_fire)
402  val f2_pc_high_plus1          = RegEnable(f1_pc_high_plus1, f1_fire)
403  val f2_pc                     = CatPC(f2_pc_lower_result, f2_pc_high, f2_pc_high_plus1)
404
405  val f2_cut_ptr                = RegEnable(f1_cut_ptr, f1_fire)
406  val f2_resend_vaddr           = RegEnable(f1_ftq_req.startAddr + 2.U, f1_fire)
407
408  def isNextLine(pc: UInt, startAddr: UInt) = {
409    startAddr(blockOffBits) ^ pc(blockOffBits)
410  }
411
412  def isLastInLine(pc: UInt) = {
413    pc(blockOffBits - 1, 0) === "b111110".U
414  }
415
416  val f2_foldpc = VecInit(f2_pc.map(i => XORFold(i(VAddrBits-1,1), MemPredPCWidth)))
417  val f2_jump_range = Fill(PredictWidth, !f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~f2_ftq_req.ftqOffset.bits
418  val f2_ftr_range  = Fill(PredictWidth,  f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~getBasicBlockIdx(f2_ftq_req.nextStartAddr, f2_ftq_req.startAddr)
419  val f2_instr_range = f2_jump_range & f2_ftr_range
420  val f2_exception_vec = VecInit((0 until PredictWidth).map( i => MuxCase(ExceptionType.none, Seq(
421      !isNextLine(f2_pc(i), f2_ftq_req.startAddr)                   -> f2_exception(0),
422      (isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine) -> f2_exception(1)
423  ))))
424  val f2_perf_info    = io.icachePerfInfo
425
426  def cut(cacheline: UInt, cutPtr: Vec[UInt]) : Vec[UInt] ={
427    require(HasCExtension)
428    // if(HasCExtension){
429      val result   = Wire(Vec(PredictWidth + 1, UInt(16.W)))
430      val dataVec  = cacheline.asTypeOf(Vec(blockBytes, UInt(16.W))) //32 16-bit data vector
431      (0 until PredictWidth + 1).foreach( i =>
432        result(i) := dataVec(cutPtr(i)) //the max ptr is 3*blockBytes/4-1
433      )
434      result
435    // } else {
436    //   val result   = Wire(Vec(PredictWidth, UInt(32.W)) )
437    //   val dataVec  = cacheline.asTypeOf(Vec(blockBytes * 2/ 4, UInt(32.W)))
438    //   (0 until PredictWidth).foreach( i =>
439    //     result(i) := dataVec(cutPtr(i))
440    //   )
441    //   result
442    // }
443  }
444
445  val f2_cache_response_data = fromICache.map(_.bits.data)
446  val f2_data_2_cacheline = Cat(f2_cache_response_data(0), f2_cache_response_data(0))
447
448  val f2_cut_data   = cut(f2_data_2_cacheline, f2_cut_ptr)
449
450  /** predecode (include RVC expander) */
451  // preDecoderRegIn.data := f2_reg_cut_data
452  // preDecoderRegInIn.frontendTrigger := io.frontendTrigger
453  // preDecoderRegInIn.csrTriggerEnable := io.csrTriggerEnable
454  // preDecoderRegIn.pc  := f2_pc
455
456  val preDecoderIn  = preDecoder.io.in
457  preDecoderIn.valid := f2_valid
458  preDecoderIn.bits.data := f2_cut_data
459  preDecoderIn.bits.frontendTrigger := io.frontendTrigger
460  preDecoderIn.bits.pc  := f2_pc
461  val preDecoderOut = preDecoder.io.out
462
463  //val f2_expd_instr     = preDecoderOut.expInstr
464  val f2_instr          = preDecoderOut.instr
465  val f2_pd             = preDecoderOut.pd
466  val f2_jump_offset    = preDecoderOut.jumpOffset
467  val f2_hasHalfValid   =  preDecoderOut.hasHalfValid
468  /* if there is a cross-page RVI instruction, and the former page has no exception,
469   * whether it has exception is actually depends on the latter page
470   */
471  val f2_crossPage_exception_vec = VecInit((0 until PredictWidth).map { i => Mux(
472    isLastInLine(f2_pc(i)) && !f2_pd(i).isRVC && f2_doubleLine && f2_exception(0) === ExceptionType.none,
473    f2_exception(1),
474    ExceptionType.none
475  )})
476  XSPerfAccumulate("fetch_bubble_icache_not_resp",   f2_valid && !icacheRespAllValid )
477
478
479  /**
480    ******************************************************************************
481    * IFU Stage 3
482    * - handle MMIO instruciton
483    *  -send request to Uncache fetch Unit
484    *  -every packet include 1 MMIO instruction
485    *  -MMIO instructions will stop fetch pipeline until commiting from RoB
486    *  -flush to snpc (send ifu_redirect to Ftq)
487    * - Ibuffer enqueue
488    * - check predict result in Frontend (jalFault/retFault/notCFIFault/invalidTakenFault/targetFault)
489    * - handle last half RVI instruction
490    ******************************************************************************
491    */
492
493  val f3_valid          = RegInit(false.B)
494  val f3_ftq_req        = RegEnable(f2_ftq_req,    f2_fire)
495  // val f3_situation      = RegEnable(f2_situation,  f2_fire)
496  val f3_doubleLine     = RegEnable(f2_doubleLine, f2_fire)
497  val f3_fire           = io.toIbuffer.fire
498
499  val f3_cut_data       = RegEnable(f2_cut_data,   f2_fire)
500
501  val f3_exception      = RegEnable(f2_exception,  f2_fire)
502  val f3_mmio           = RegEnable(f2_mmio,       f2_fire)
503
504  //val f3_expd_instr     = RegEnable(f2_expd_instr,  f2_fire)
505  val f3_instr          = RegEnable(f2_instr, f2_fire)
506  val f3_expd           = (0 until PredictWidth).map{ i =>
507    val expander       = Module(new RVCExpander)
508    expander.io.in := f3_instr(i)
509    (expander.io.out.bits, expander.io.ill)
510  }
511  val f3_expd_instr     = VecInit(f3_expd.map(_._1))
512  val f3_ill_raw        = VecInit(f3_expd.map(_._2))
513
514
515  val f3_pd_wire         = RegEnable(f2_pd,            f2_fire)
516  val f3_pd              = WireInit(f3_pd_wire)
517  val f3_jump_offset     = RegEnable(f2_jump_offset,   f2_fire)
518  val f3_exception_vec   = RegEnable(f2_exception_vec, f2_fire)
519  val f3_crossPage_exception_vec = RegEnable(f2_crossPage_exception_vec, f2_fire)
520
521  val f3_pc_lower_result = RegEnable(f2_pc_lower_result, f2_fire)
522  val f3_pc_high         = RegEnable(f2_pc_high, f2_fire)
523  val f3_pc_high_plus1   = RegEnable(f2_pc_high_plus1, f2_fire)
524  val f3_pc              = CatPC(f3_pc_lower_result, f3_pc_high, f3_pc_high_plus1)
525
526  val f3_pc_last_lower_result_plus2 = RegEnable(f2_pc_lower_result(PredictWidth - 1) + 2.U, f2_fire)
527  val f3_pc_last_lower_result_plus4 = RegEnable(f2_pc_lower_result(PredictWidth - 1) + 4.U, f2_fire)
528  //val f3_half_snpc      = RegEnable(f2_half_snpc,   f2_fire)
529
530  /**
531    ***********************************************************************
532    * Half snpc(i) is larger than pc(i) by 4. Using pc to calculate half snpc may be a good choice.
533    ***********************************************************************
534    */
535  val f3_half_snpc      = Wire(Vec(PredictWidth,UInt(VAddrBits.W)))
536  for(i <- 0 until PredictWidth){
537    if(i == (PredictWidth - 2)){
538      f3_half_snpc(i)   := CatPC(f3_pc_last_lower_result_plus2, f3_pc_high, f3_pc_high_plus1)
539    } else if (i == (PredictWidth - 1)){
540      f3_half_snpc(i)   := CatPC(f3_pc_last_lower_result_plus4, f3_pc_high, f3_pc_high_plus1)
541    } else {
542      f3_half_snpc(i)   := f3_pc(i+2)
543    }
544  }
545
546  val f3_instr_range    = RegEnable(f2_instr_range, f2_fire)
547  val f3_foldpc         = RegEnable(f2_foldpc,      f2_fire)
548  val f3_hasHalfValid   = RegEnable(f2_hasHalfValid,             f2_fire)
549  val f3_paddrs         = RegEnable(f2_paddrs,  f2_fire)
550  val f3_gpaddr         = RegEnable(f2_gpaddr,  f2_fire)
551  val f3_resend_vaddr   = RegEnable(f2_resend_vaddr,             f2_fire)
552
553  // Expand 1 bit to prevent overflow when assert
554  val f3_ftq_req_startAddr      = Cat(0.U(1.W), f3_ftq_req.startAddr)
555  val f3_ftq_req_nextStartAddr  = Cat(0.U(1.W), f3_ftq_req.nextStartAddr)
556  // brType, isCall and isRet generation is delayed to f3 stage
557  val f3Predecoder = Module(new F3Predecoder)
558
559  f3Predecoder.io.in.instr := f3_instr
560
561  f3_pd.zipWithIndex.map{ case (pd,i) =>
562    pd.brType := f3Predecoder.io.out.pd(i).brType
563    pd.isCall := f3Predecoder.io.out.pd(i).isCall
564    pd.isRet  := f3Predecoder.io.out.pd(i).isRet
565  }
566
567  val f3PdDiff = f3_pd_wire.zip(f3_pd).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_)
568  XSError(f3_valid && f3PdDiff, "f3 pd diff")
569
570  when(f3_valid && !f3_ftq_req.ftqOffset.valid){
571    assert(f3_ftq_req_startAddr + (2*PredictWidth).U >= f3_ftq_req_nextStartAddr, s"More tha ${2*PredictWidth} Bytes fetch is not allowed!")
572  }
573
574  /*** MMIO State Machine***/
575  val f3_mmio_data          = Reg(Vec(2, UInt(16.W)))
576  val mmio_is_RVC           = RegInit(false.B)
577  val mmio_resend_addr      = RegInit(0.U(PAddrBits.W))
578  val mmio_resend_exception = RegInit(0.U(ExceptionType.width.W))
579  val mmio_resend_gpaddr    = RegInit(0.U(GPAddrBits.W))
580
581  //last instuction finish
582  val is_first_instr = RegInit(true.B)
583  /*** Determine whether the MMIO instruction is executable based on the previous prediction block ***/
584  io.mmioCommitRead.mmioFtqPtr := RegNext(f3_ftq_req.ftqIdx - 1.U)
585
586  val m_idle :: m_waitLastCmt:: m_sendReq :: m_waitResp :: m_sendTLB :: m_tlbResp :: m_sendPMP :: m_resendReq :: m_waitResendResp :: m_waitCommit :: m_commited :: Nil = Enum(11)
587  val mmio_state = RegInit(m_idle)
588
589  val f3_req_is_mmio     = f3_mmio && f3_valid
590  val mmio_commit = VecInit(io.rob_commits.map{commit => commit.valid && commit.bits.ftqIdx === f3_ftq_req.ftqIdx &&  commit.bits.ftqOffset === 0.U}).asUInt.orR
591  val f3_mmio_req_commit = f3_req_is_mmio && mmio_state === m_commited
592
593  val f3_mmio_to_commit =  f3_req_is_mmio && mmio_state === m_waitCommit
594  val f3_mmio_to_commit_next = RegNext(f3_mmio_to_commit)
595  val f3_mmio_can_go      = f3_mmio_to_commit && !f3_mmio_to_commit_next
596
597  val fromFtqRedirectReg = Wire(fromFtq.redirect.cloneType)
598  fromFtqRedirectReg.bits := RegEnable(fromFtq.redirect.bits, 0.U.asTypeOf(fromFtq.redirect.bits), fromFtq.redirect.valid)
599  fromFtqRedirectReg.valid := RegNext(fromFtq.redirect.valid, init = false.B)
600  val mmioF3Flush           = RegNext(f3_flush,init = false.B)
601  val f3_ftq_flush_self     = fromFtqRedirectReg.valid && RedirectLevel.flushItself(fromFtqRedirectReg.bits.level)
602  val f3_ftq_flush_by_older = fromFtqRedirectReg.valid && isBefore(fromFtqRedirectReg.bits.ftqIdx, f3_ftq_req.ftqIdx)
603
604  val f3_need_not_flush = f3_req_is_mmio && fromFtqRedirectReg.valid && !f3_ftq_flush_self && !f3_ftq_flush_by_older
605
606  /**
607    **********************************************************************************
608    * We want to defer instruction fetching when encountering MMIO instructions to ensure that the MMIO region is not negatively impacted.
609    * This is the exception when the first instruction is an MMIO instruction.
610    **********************************************************************************
611    */
612  when(is_first_instr && f3_fire){
613    is_first_instr := false.B
614  }
615
616  when(f3_flush && !f3_req_is_mmio)                                                 {f3_valid := false.B}
617  .elsewhen(mmioF3Flush && f3_req_is_mmio && !f3_need_not_flush)                    {f3_valid := false.B}
618  .elsewhen(f2_fire && !f2_flush )                                                  {f3_valid := true.B }
619  .elsewhen(io.toIbuffer.fire && !f3_req_is_mmio)                                   {f3_valid := false.B}
620  .elsewhen{f3_req_is_mmio && f3_mmio_req_commit}                                   {f3_valid := false.B}
621
622  val f3_mmio_use_seq_pc = RegInit(false.B)
623
624  val (redirect_ftqIdx, redirect_ftqOffset)  = (fromFtqRedirectReg.bits.ftqIdx,fromFtqRedirectReg.bits.ftqOffset)
625  val redirect_mmio_req = fromFtqRedirectReg.valid && redirect_ftqIdx === f3_ftq_req.ftqIdx && redirect_ftqOffset === 0.U
626
627  when(RegNext(f2_fire && !f2_flush) && f3_req_is_mmio)        { f3_mmio_use_seq_pc := true.B  }
628  .elsewhen(redirect_mmio_req)                                 { f3_mmio_use_seq_pc := false.B }
629
630  f3_ready := (io.toIbuffer.ready && (f3_mmio_req_commit || !f3_req_is_mmio)) || !f3_valid
631
632  // mmio state machine
633  switch(mmio_state){
634    is(m_idle){
635      when(f3_req_is_mmio){
636        mmio_state := m_waitLastCmt
637      }
638    }
639
640    is(m_waitLastCmt){
641      when(is_first_instr){
642        mmio_state := m_sendReq
643      }.otherwise{
644        mmio_state := Mux(io.mmioCommitRead.mmioLastCommit, m_sendReq, m_waitLastCmt)
645      }
646    }
647
648    is(m_sendReq){
649      mmio_state := Mux(toUncache.fire, m_waitResp, m_sendReq)
650    }
651
652    is(m_waitResp){
653      when(fromUncache.fire){
654          val isRVC = fromUncache.bits.data(1,0) =/= 3.U
655          val needResend = !isRVC && f3_paddrs(0)(2,1) === 3.U
656          mmio_state      := Mux(needResend, m_sendTLB, m_waitCommit)
657          mmio_is_RVC     := isRVC
658          f3_mmio_data(0) := fromUncache.bits.data(15,0)
659          f3_mmio_data(1) := fromUncache.bits.data(31,16)
660      }
661    }
662
663    is(m_sendTLB){
664      mmio_state := Mux(io.iTLBInter.req.fire, m_tlbResp, m_sendTLB)
665    }
666
667    is(m_tlbResp){
668      when(io.iTLBInter.resp.fire) {
669        // we are using a blocked tlb, so resp.fire must have !resp.bits.miss
670        assert(!io.iTLBInter.resp.bits.miss, "blocked mode iTLB miss when resp.fire")
671        val tlb_exception = ExceptionType.fromTlbResp(io.iTLBInter.resp.bits)
672        // if tlb has exception, abort checking pmp, just send instr & exception to ibuffer and wait for commit
673        mmio_state := Mux(tlb_exception === ExceptionType.none, m_sendPMP, m_waitCommit)
674        // also save itlb response
675        mmio_resend_addr      := io.iTLBInter.resp.bits.paddr(0)
676        mmio_resend_exception := tlb_exception
677        mmio_resend_gpaddr    := io.iTLBInter.resp.bits.gpaddr(0)
678      }
679    }
680
681    is(m_sendPMP){
682      // if pmp re-check does not respond mmio, must be access fault
683      val pmp_exception = Mux(io.pmp.resp.mmio, ExceptionType.fromPMPResp(io.pmp.resp), ExceptionType.af)
684      // if pmp has exception, abort sending request, just send instr & exception to ibuffer and wait for commit
685      mmio_state := Mux(pmp_exception === ExceptionType.none, m_resendReq, m_waitCommit)
686      // also save pmp response
687      mmio_resend_exception := pmp_exception
688    }
689
690    is(m_resendReq){
691      mmio_state := Mux(toUncache.fire, m_waitResendResp, m_resendReq)
692    }
693
694    is(m_waitResendResp) {
695      when(fromUncache.fire) {
696        mmio_state      := m_waitCommit
697        f3_mmio_data(1) := fromUncache.bits.data(15,0)
698      }
699    }
700
701    is(m_waitCommit) {
702      mmio_state := Mux(mmio_commit, m_commited, m_waitCommit)
703    }
704
705    //normal mmio instruction
706    is(m_commited) {
707      mmio_state            := m_idle
708      mmio_is_RVC           := false.B
709      mmio_resend_addr      := 0.U
710      mmio_resend_exception := ExceptionType.none
711      mmio_resend_gpaddr    := 0.U
712    }
713  }
714
715  // Exception or flush by older branch prediction
716  // Condition is from RegNext(fromFtq.redirect), 1 cycle after backend rediect
717  when(f3_ftq_flush_self || f3_ftq_flush_by_older) {
718    mmio_state            := m_idle
719    mmio_is_RVC           := false.B
720    mmio_resend_addr      := 0.U
721    mmio_resend_exception := ExceptionType.none
722    mmio_resend_gpaddr    := 0.U
723    f3_mmio_data.map(_ := 0.U)
724  }
725
726  toUncache.valid     := ((mmio_state === m_sendReq) || (mmio_state === m_resendReq)) && f3_req_is_mmio
727  toUncache.bits.addr := Mux((mmio_state === m_resendReq), mmio_resend_addr, f3_paddrs(0))
728  fromUncache.ready   := true.B
729
730  // send itlb request in m_sendTLB state
731  io.iTLBInter.req.valid                   := (mmio_state === m_sendTLB) && f3_req_is_mmio
732  io.iTLBInter.req.bits.size               := 3.U
733  io.iTLBInter.req.bits.vaddr              := f3_resend_vaddr
734  io.iTLBInter.req.bits.debug.pc           := f3_resend_vaddr
735  io.iTLBInter.req.bits.cmd                := TlbCmd.exec
736  io.iTLBInter.req.bits.kill               := false.B // IFU use itlb for mmio, doesn't need sync, set it to false
737  io.iTLBInter.req.bits.no_translate       := false.B
738  io.iTLBInter.req.bits.hyperinst          := DontCare
739  io.iTLBInter.req.bits.hlvx               := DontCare
740  io.iTLBInter.req.bits.memidx             := DontCare
741  io.iTLBInter.req.bits.debug.robIdx       := DontCare
742  io.iTLBInter.req.bits.debug.isFirstIssue := DontCare
743  io.iTLBInter.req.bits.pmp_addr           := DontCare
744  // whats the difference between req_kill and req.bits.kill?
745  io.iTLBInter.req_kill := false.B
746  // wait for itlb response in m_tlbResp state
747  io.iTLBInter.resp.ready := (mmio_state === m_tlbResp) && f3_req_is_mmio
748
749  io.pmp.req.valid := (mmio_state === m_sendPMP) && f3_req_is_mmio
750  io.pmp.req.bits.addr  := mmio_resend_addr
751  io.pmp.req.bits.size  := 3.U
752  io.pmp.req.bits.cmd   := TlbCmd.exec
753
754  val f3_lastHalf       = RegInit(0.U.asTypeOf(new LastHalfInfo))
755
756  val f3_predecode_range = VecInit(preDecoderOut.pd.map(inst => inst.valid)).asUInt
757  val f3_mmio_range      = VecInit((0 until PredictWidth).map(i => if(i ==0) true.B else false.B))
758  val f3_instr_valid     = Wire(Vec(PredictWidth, Bool()))
759
760  // Illegal instruction record
761  val f3_ill            = VecInit((0 until PredictWidth).map{ i =>
762    f3_ill_raw(i) && f3_instr_valid(i)
763  })
764  val f4_instr = RegEnable(f3_instr, f3_fire)
765  val f4_ill = RegEnable(f3_ill, f3_fire)
766  val illegalBuf = RegInit(0.U(32.W))
767
768  val illBufClear = RegInit(true.B)
769
770  dontTouch(illegalBuf)
771  when (f4_ill.asUInt.orR && RegNext(f3_fire) && illBufClear) {
772    illegalBuf := ParallelPriorityMux(f4_ill, f4_instr)
773    illBufClear := false.B
774  }
775
776  when (backend_redirect || wb_redirect) {
777    illBufClear := true.B
778  }
779
780  io.illBuf := illegalBuf
781
782  /*** prediction result check   ***/
783  checkerIn.ftqOffset   := f3_ftq_req.ftqOffset
784  checkerIn.jumpOffset  := f3_jump_offset
785  checkerIn.target      := f3_ftq_req.nextStartAddr
786  checkerIn.instrRange  := f3_instr_range.asTypeOf(Vec(PredictWidth, Bool()))
787  checkerIn.instrValid  := f3_instr_valid.asTypeOf(Vec(PredictWidth, Bool()))
788  checkerIn.pds         := f3_pd
789  checkerIn.pc          := f3_pc
790  checkerIn.fire_in     := RegNext(f2_fire, init = false.B)
791
792  /*** handle half RVI in the last 2 Bytes  ***/
793
794  def hasLastHalf(idx: UInt) = {
795    //!f3_pd(idx).isRVC && checkerOutStage1.fixedRange(idx) && f3_instr_valid(idx) && !checkerOutStage1.fixedTaken(idx) && !checkerOutStage2.fixedMissPred(idx) && ! f3_req_is_mmio
796    !f3_pd(idx).isRVC && checkerOutStage1.fixedRange(idx) && f3_instr_valid(idx) && !checkerOutStage1.fixedTaken(idx) && ! f3_req_is_mmio
797  }
798
799  val f3_last_validIdx       = ParallelPosteriorityEncoder(checkerOutStage1.fixedRange)
800
801  val f3_hasLastHalf         = hasLastHalf((PredictWidth - 1).U)
802  val f3_false_lastHalf      = hasLastHalf(f3_last_validIdx)
803  val f3_false_snpc          = f3_half_snpc(f3_last_validIdx)
804
805  val f3_lastHalf_mask    = VecInit((0 until PredictWidth).map( i => if(i ==0) false.B else true.B )).asUInt
806  val f3_lastHalf_disable = RegInit(false.B)
807
808  when(f3_flush || (f3_fire && f3_lastHalf_disable)){
809    f3_lastHalf_disable := false.B
810  }
811
812  when (f3_flush) {
813    f3_lastHalf.valid := false.B
814  }.elsewhen (f3_fire) {
815    f3_lastHalf.valid := f3_hasLastHalf && !f3_lastHalf_disable
816    f3_lastHalf.middlePC := f3_ftq_req.nextStartAddr
817  }
818
819  f3_instr_valid := Mux(f3_lastHalf.valid,f3_hasHalfValid ,VecInit(f3_pd.map(inst => inst.valid)))
820
821  /*** frontend Trigger  ***/
822  frontendTrigger.io.pds  := f3_pd
823  frontendTrigger.io.pc   := f3_pc
824  frontendTrigger.io.data   := f3_cut_data
825
826  frontendTrigger.io.frontendTrigger  := io.frontendTrigger
827
828  val f3_triggered = frontendTrigger.io.triggered
829  val f3_toIbuffer_valid = f3_valid && (!f3_req_is_mmio || f3_mmio_can_go) && !f3_flush
830
831  /*** send to Ibuffer  ***/
832  io.toIbuffer.valid            := f3_toIbuffer_valid
833  io.toIbuffer.bits.instrs      := f3_expd_instr
834  io.toIbuffer.bits.valid       := f3_instr_valid.asUInt
835  io.toIbuffer.bits.enqEnable   := checkerOutStage1.fixedRange.asUInt & f3_instr_valid.asUInt
836  io.toIbuffer.bits.pd          := f3_pd
837  io.toIbuffer.bits.ftqPtr      := f3_ftq_req.ftqIdx
838  io.toIbuffer.bits.pc          := f3_pc
839  io.toIbuffer.bits.ftqOffset.zipWithIndex.map{case(a, i) => a.bits := i.U; a.valid := checkerOutStage1.fixedTaken(i) && !f3_req_is_mmio}
840  io.toIbuffer.bits.foldpc      := f3_foldpc
841  io.toIbuffer.bits.exceptionType := ExceptionType.merge(f3_exception_vec, f3_crossPage_exception_vec)
842  io.toIbuffer.bits.crossPageIPFFix := f3_crossPage_exception_vec.map(_ =/= ExceptionType.none)
843  io.toIbuffer.bits.triggered   := f3_triggered
844
845  when(f3_lastHalf.valid){
846    io.toIbuffer.bits.enqEnable := checkerOutStage1.fixedRange.asUInt & f3_instr_valid.asUInt & f3_lastHalf_mask
847    io.toIbuffer.bits.valid     := f3_lastHalf_mask & f3_instr_valid.asUInt
848  }
849
850  /** to backend */
851  // f3_gpaddr is valid iff gpf is detected
852  io.toBackend.gpaddrMem_wen   := f3_toIbuffer_valid && Mux(
853    f3_req_is_mmio,
854    mmio_resend_exception === ExceptionType.gpf,
855    f3_exception.map(_ === ExceptionType.gpf).reduce(_||_)
856  )
857  io.toBackend.gpaddrMem_waddr := f3_ftq_req.ftqIdx.value
858  io.toBackend.gpaddrMem_wdata := Mux(f3_req_is_mmio, mmio_resend_gpaddr, f3_gpaddr)
859
860  //Write back to Ftq
861  val f3_cache_fetch = f3_valid && !(f2_fire && !f2_flush)
862  val finishFetchMaskReg = RegNext(f3_cache_fetch)
863
864  val mmioFlushWb = Wire(Valid(new PredecodeWritebackBundle))
865  val f3_mmio_missOffset = Wire(ValidUndirectioned(UInt(log2Ceil(PredictWidth).W)))
866  f3_mmio_missOffset.valid := f3_req_is_mmio
867  f3_mmio_missOffset.bits  := 0.U
868
869  // Send mmioFlushWb back to FTQ 1 cycle after uncache fetch return
870  // When backend redirect, mmio_state reset after 1 cycle.
871  // In this case, mask .valid to avoid overriding backend redirect
872  mmioFlushWb.valid           := (f3_req_is_mmio && mmio_state === m_waitCommit && RegNext(fromUncache.fire) &&
873    f3_mmio_use_seq_pc && !f3_ftq_flush_self && !f3_ftq_flush_by_older)
874  mmioFlushWb.bits.pc         := f3_pc
875  mmioFlushWb.bits.pd         := f3_pd
876  mmioFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid :=  f3_mmio_range(i)}
877  mmioFlushWb.bits.ftqIdx     := f3_ftq_req.ftqIdx
878  mmioFlushWb.bits.ftqOffset  := f3_ftq_req.ftqOffset.bits
879  mmioFlushWb.bits.misOffset  := f3_mmio_missOffset
880  mmioFlushWb.bits.cfiOffset  := DontCare
881  mmioFlushWb.bits.target     := Mux(mmio_is_RVC, f3_ftq_req.startAddr + 2.U , f3_ftq_req.startAddr + 4.U)
882  mmioFlushWb.bits.jalTarget  := DontCare
883  mmioFlushWb.bits.instrRange := f3_mmio_range
884
885  /** external predecode for MMIO instruction */
886  when(f3_req_is_mmio){
887    val inst  = Cat(f3_mmio_data(1), f3_mmio_data(0))
888    val currentIsRVC   = isRVC(inst)
889
890    val brType::isCall::isRet::Nil = brInfo(inst)
891    val jalOffset = jal_offset(inst, currentIsRVC)
892    val brOffset  = br_offset(inst, currentIsRVC)
893
894    io.toIbuffer.bits.instrs(0) := new RVCDecoder(inst, XLEN, fLen, useAddiForMv = true).decode.bits
895
896
897    io.toIbuffer.bits.pd(0).valid   := true.B
898    io.toIbuffer.bits.pd(0).isRVC   := currentIsRVC
899    io.toIbuffer.bits.pd(0).brType  := brType
900    io.toIbuffer.bits.pd(0).isCall  := isCall
901    io.toIbuffer.bits.pd(0).isRet   := isRet
902
903    io.toIbuffer.bits.exceptionType(0)   := mmio_resend_exception
904    io.toIbuffer.bits.crossPageIPFFix(0) := mmio_resend_exception =/= ExceptionType.none
905
906    io.toIbuffer.bits.enqEnable   := f3_mmio_range.asUInt
907
908    mmioFlushWb.bits.pd(0).valid   := true.B
909    mmioFlushWb.bits.pd(0).isRVC   := currentIsRVC
910    mmioFlushWb.bits.pd(0).brType  := brType
911    mmioFlushWb.bits.pd(0).isCall  := isCall
912    mmioFlushWb.bits.pd(0).isRet   := isRet
913  }
914
915  mmio_redirect := (f3_req_is_mmio && mmio_state === m_waitCommit && RegNext(fromUncache.fire)  && f3_mmio_use_seq_pc)
916
917  XSPerfAccumulate("fetch_bubble_ibuffer_not_ready",   io.toIbuffer.valid && !io.toIbuffer.ready )
918
919
920  /**
921    ******************************************************************************
922    * IFU Write Back Stage
923    * - write back predecode information to Ftq to update
924    * - redirect if found fault prediction
925    * - redirect if has false hit last half (last PC is not start + 32 Bytes, but in the midle of an notCFI RVI instruction)
926    ******************************************************************************
927    */
928  val wb_enable         = RegNext(f2_fire && !f2_flush) && !f3_req_is_mmio && !f3_flush
929  val wb_valid          = RegNext(wb_enable, init = false.B)
930  val wb_ftq_req        = RegEnable(f3_ftq_req, wb_enable)
931
932  val wb_check_result_stage1   = RegEnable(checkerOutStage1, wb_enable)
933  val wb_check_result_stage2   = checkerOutStage2
934  val wb_instr_range    = RegEnable(io.toIbuffer.bits.enqEnable, wb_enable)
935
936  val wb_pc_lower_result        = RegEnable(f3_pc_lower_result, wb_enable)
937  val wb_pc_high                = RegEnable(f3_pc_high, wb_enable)
938  val wb_pc_high_plus1          = RegEnable(f3_pc_high_plus1, wb_enable)
939  val wb_pc                     = CatPC(wb_pc_lower_result, wb_pc_high, wb_pc_high_plus1)
940
941  //val wb_pc             = RegEnable(f3_pc, wb_enable)
942  val wb_pd             = RegEnable(f3_pd, wb_enable)
943  val wb_instr_valid    = RegEnable(f3_instr_valid, wb_enable)
944
945  /* false hit lastHalf */
946  val wb_lastIdx        = RegEnable(f3_last_validIdx, wb_enable)
947  val wb_false_lastHalf = RegEnable(f3_false_lastHalf, wb_enable) && wb_lastIdx =/= (PredictWidth - 1).U
948  val wb_false_target   = RegEnable(f3_false_snpc, wb_enable)
949
950  val wb_half_flush = wb_false_lastHalf
951  val wb_half_target = wb_false_target
952
953  /* false oversize */
954  val lastIsRVC = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool())).last  && wb_pd.last.isRVC
955  val lastIsRVI = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool()))(PredictWidth - 2) && !wb_pd(PredictWidth - 2).isRVC
956  val lastTaken = wb_check_result_stage1.fixedTaken.last
957
958  f3_wb_not_flush := wb_ftq_req.ftqIdx === f3_ftq_req.ftqIdx && f3_valid && wb_valid
959
960  /** if a req with a last half but miss predicted enters in wb stage, and this cycle f3 stalls,
961    * we set a flag to notify f3 that the last half flag need not to be set.
962    */
963  //f3_fire is after wb_valid
964  when(wb_valid && RegNext(f3_hasLastHalf,init = false.B)
965        && wb_check_result_stage2.fixedMissPred(PredictWidth - 1) && !f3_fire  && !RegNext(f3_fire,init = false.B) && !f3_flush
966      ){
967    f3_lastHalf_disable := true.B
968  }
969
970  //wb_valid and f3_fire are in same cycle
971  when(wb_valid && RegNext(f3_hasLastHalf,init = false.B)
972        && wb_check_result_stage2.fixedMissPred(PredictWidth - 1) && f3_fire
973      ){
974    f3_lastHalf.valid := false.B
975  }
976
977  val checkFlushWb = Wire(Valid(new PredecodeWritebackBundle))
978  val checkFlushWbjalTargetIdx = ParallelPriorityEncoder(VecInit(wb_pd.zip(wb_instr_valid).map{case (pd, v) => v && pd.isJal }))
979  val checkFlushWbTargetIdx = ParallelPriorityEncoder(wb_check_result_stage2.fixedMissPred)
980  checkFlushWb.valid                  := wb_valid
981  checkFlushWb.bits.pc                := wb_pc
982  checkFlushWb.bits.pd                := wb_pd
983  checkFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := wb_instr_valid(i)}
984  checkFlushWb.bits.ftqIdx            := wb_ftq_req.ftqIdx
985  checkFlushWb.bits.ftqOffset         := wb_ftq_req.ftqOffset.bits
986  checkFlushWb.bits.misOffset.valid   := ParallelOR(wb_check_result_stage2.fixedMissPred) || wb_half_flush
987  checkFlushWb.bits.misOffset.bits    := Mux(wb_half_flush, wb_lastIdx, ParallelPriorityEncoder(wb_check_result_stage2.fixedMissPred))
988  checkFlushWb.bits.cfiOffset.valid   := ParallelOR(wb_check_result_stage1.fixedTaken)
989  checkFlushWb.bits.cfiOffset.bits    := ParallelPriorityEncoder(wb_check_result_stage1.fixedTaken)
990  checkFlushWb.bits.target            := Mux(wb_half_flush, wb_half_target, wb_check_result_stage2.fixedTarget(checkFlushWbTargetIdx))
991  checkFlushWb.bits.jalTarget         := wb_check_result_stage2.jalTarget(checkFlushWbjalTargetIdx)
992  checkFlushWb.bits.instrRange        := wb_instr_range.asTypeOf(Vec(PredictWidth, Bool()))
993
994  toFtq.pdWb := Mux(wb_valid, checkFlushWb,  mmioFlushWb)
995
996  wb_redirect := checkFlushWb.bits.misOffset.valid && wb_valid
997
998  /*write back flush type*/
999  val checkFaultType = wb_check_result_stage2.faultType
1000  val checkJalFault =  wb_valid && checkFaultType.map(_.isjalFault).reduce(_||_)
1001  val checkRetFault =  wb_valid && checkFaultType.map(_.isRetFault).reduce(_||_)
1002  val checkTargetFault =  wb_valid && checkFaultType.map(_.istargetFault).reduce(_||_)
1003  val checkNotCFIFault =  wb_valid && checkFaultType.map(_.notCFIFault).reduce(_||_)
1004  val checkInvalidTaken =  wb_valid && checkFaultType.map(_.invalidTakenFault).reduce(_||_)
1005
1006
1007  XSPerfAccumulate("predecode_flush_jalFault",   checkJalFault )
1008  XSPerfAccumulate("predecode_flush_retFault",   checkRetFault )
1009  XSPerfAccumulate("predecode_flush_targetFault",   checkTargetFault )
1010  XSPerfAccumulate("predecode_flush_notCFIFault",   checkNotCFIFault )
1011  XSPerfAccumulate("predecode_flush_incalidTakenFault",   checkInvalidTaken )
1012
1013  when(checkRetFault){
1014    XSDebug("startAddr:%x  nextstartAddr:%x  taken:%d    takenIdx:%d\n",
1015        wb_ftq_req.startAddr, wb_ftq_req.nextStartAddr, wb_ftq_req.ftqOffset.valid, wb_ftq_req.ftqOffset.bits)
1016  }
1017
1018
1019  /** performance counter */
1020  val f3_perf_info     = RegEnable(f2_perf_info,  f2_fire)
1021  val f3_req_0    = io.toIbuffer.fire
1022  val f3_req_1    = io.toIbuffer.fire && f3_doubleLine
1023  val f3_hit_0    = io.toIbuffer.fire && f3_perf_info.bank_hit(0)
1024  val f3_hit_1    = io.toIbuffer.fire && f3_doubleLine & f3_perf_info.bank_hit(1)
1025  val f3_hit      = f3_perf_info.hit
1026  val perfEvents = Seq(
1027    ("frontendFlush                ", wb_redirect                                ),
1028    ("ifu_req                      ", io.toIbuffer.fire                        ),
1029    ("ifu_miss                     ", io.toIbuffer.fire && !f3_perf_info.hit   ),
1030    ("ifu_req_cacheline_0          ", f3_req_0                                   ),
1031    ("ifu_req_cacheline_1          ", f3_req_1                                   ),
1032    ("ifu_req_cacheline_0_hit      ", f3_hit_1                                   ),
1033    ("ifu_req_cacheline_1_hit      ", f3_hit_1                                   ),
1034    ("only_0_hit                   ", f3_perf_info.only_0_hit       && io.toIbuffer.fire ),
1035    ("only_0_miss                  ", f3_perf_info.only_0_miss      && io.toIbuffer.fire ),
1036    ("hit_0_hit_1                  ", f3_perf_info.hit_0_hit_1      && io.toIbuffer.fire ),
1037    ("hit_0_miss_1                 ", f3_perf_info.hit_0_miss_1     && io.toIbuffer.fire ),
1038    ("miss_0_hit_1                 ", f3_perf_info.miss_0_hit_1     && io.toIbuffer.fire ),
1039    ("miss_0_miss_1                ", f3_perf_info.miss_0_miss_1    && io.toIbuffer.fire ),
1040  )
1041  generatePerfEvent()
1042
1043  XSPerfAccumulate("ifu_req",   io.toIbuffer.fire )
1044  XSPerfAccumulate("ifu_miss",  io.toIbuffer.fire && !f3_hit )
1045  XSPerfAccumulate("ifu_req_cacheline_0", f3_req_0  )
1046  XSPerfAccumulate("ifu_req_cacheline_1", f3_req_1  )
1047  XSPerfAccumulate("ifu_req_cacheline_0_hit",   f3_hit_0 )
1048  XSPerfAccumulate("ifu_req_cacheline_1_hit",   f3_hit_1 )
1049  XSPerfAccumulate("frontendFlush",  wb_redirect )
1050  XSPerfAccumulate("only_0_hit",      f3_perf_info.only_0_hit   && io.toIbuffer.fire  )
1051  XSPerfAccumulate("only_0_miss",     f3_perf_info.only_0_miss  && io.toIbuffer.fire  )
1052  XSPerfAccumulate("hit_0_hit_1",     f3_perf_info.hit_0_hit_1  && io.toIbuffer.fire  )
1053  XSPerfAccumulate("hit_0_miss_1",    f3_perf_info.hit_0_miss_1  && io.toIbuffer.fire  )
1054  XSPerfAccumulate("miss_0_hit_1",    f3_perf_info.miss_0_hit_1   && io.toIbuffer.fire )
1055  XSPerfAccumulate("miss_0_miss_1",   f3_perf_info.miss_0_miss_1 && io.toIbuffer.fire )
1056  XSPerfAccumulate("hit_0_except_1",   f3_perf_info.hit_0_except_1 && io.toIbuffer.fire )
1057  XSPerfAccumulate("miss_0_except_1",   f3_perf_info.miss_0_except_1 && io.toIbuffer.fire )
1058  XSPerfAccumulate("except_0",   f3_perf_info.except_0 && io.toIbuffer.fire )
1059  XSPerfHistogram("ifu2ibuffer_validCnt", PopCount(io.toIbuffer.bits.valid & io.toIbuffer.bits.enqEnable), io.toIbuffer.fire, 0, PredictWidth + 1, 1)
1060
1061  val hartId = p(XSCoreParamsKey).HartId
1062  val isWriteFetchToIBufferTable = Constantin.createRecord(s"isWriteFetchToIBufferTable$hartId")
1063  val isWriteIfuWbToFtqTable = Constantin.createRecord(s"isWriteIfuWbToFtqTable$hartId")
1064  val fetchToIBufferTable = ChiselDB.createTable(s"FetchToIBuffer$hartId", new FetchToIBufferDB)
1065  val ifuWbToFtqTable = ChiselDB.createTable(s"IfuWbToFtq$hartId", new IfuWbToFtqDB)
1066
1067  val fetchIBufferDumpData = Wire(new FetchToIBufferDB)
1068  fetchIBufferDumpData.start_addr := f3_ftq_req.startAddr
1069  fetchIBufferDumpData.instr_count := PopCount(io.toIbuffer.bits.enqEnable)
1070  fetchIBufferDumpData.exception := (f3_perf_info.except_0 && io.toIbuffer.fire) || (f3_perf_info.hit_0_except_1 && io.toIbuffer.fire) || (f3_perf_info.miss_0_except_1 && io.toIbuffer.fire)
1071  fetchIBufferDumpData.is_cache_hit := f3_hit
1072
1073  val ifuWbToFtqDumpData = Wire(new IfuWbToFtqDB)
1074  ifuWbToFtqDumpData.start_addr := wb_ftq_req.startAddr
1075  ifuWbToFtqDumpData.is_miss_pred := checkFlushWb.bits.misOffset.valid
1076  ifuWbToFtqDumpData.miss_pred_offset := checkFlushWb.bits.misOffset.bits
1077  ifuWbToFtqDumpData.checkJalFault := checkJalFault
1078  ifuWbToFtqDumpData.checkRetFault := checkRetFault
1079  ifuWbToFtqDumpData.checkTargetFault := checkTargetFault
1080  ifuWbToFtqDumpData.checkNotCFIFault := checkNotCFIFault
1081  ifuWbToFtqDumpData.checkInvalidTaken := checkInvalidTaken
1082
1083  fetchToIBufferTable.log(
1084    data = fetchIBufferDumpData,
1085    en = isWriteFetchToIBufferTable.orR && io.toIbuffer.fire,
1086    site = "IFU" + p(XSCoreParamsKey).HartId.toString,
1087    clock = clock,
1088    reset = reset
1089  )
1090  ifuWbToFtqTable.log(
1091    data = ifuWbToFtqDumpData,
1092    en = isWriteIfuWbToFtqTable.orR && checkFlushWb.valid,
1093    site = "IFU" + p(XSCoreParamsKey).HartId.toString,
1094    clock = clock,
1095    reset = reset
1096  )
1097
1098}
1099