xref: /XiangShan/src/main/scala/xiangshan/frontend/IFU.scala (revision 0214776e1846ec238c2bf5c1a4aae2deadc45e8a)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.rocket.RVCDecoder
23import xiangshan._
24import xiangshan.cache.mmu._
25import xiangshan.frontend.icache._
26import utils._
27import utility._
28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
29import utility.ChiselDB
30
31trait HasInstrMMIOConst extends HasXSParameter with HasIFUConst{
32  def mmioBusWidth = 64
33  def mmioBusBytes = mmioBusWidth / 8
34  def maxInstrLen = 32
35}
36
37trait HasIFUConst extends HasXSParameter{
38  def addrAlign(addr: UInt, bytes: Int, highest: Int): UInt = Cat(addr(highest-1, log2Ceil(bytes)), 0.U(log2Ceil(bytes).W))
39  def fetchQueueSize = 2
40
41  def getBasicBlockIdx( pc: UInt, start:  UInt ): UInt = {
42    val byteOffset = pc - start
43    (byteOffset - instBytes.U)(log2Ceil(PredictWidth),instOffsetBits)
44  }
45}
46
47class IfuToFtqIO(implicit p:Parameters) extends XSBundle {
48  val pdWb = Valid(new PredecodeWritebackBundle)
49}
50
51class FtqInterface(implicit p: Parameters) extends XSBundle {
52  val fromFtq = Flipped(new FtqToIfuIO)
53  val toFtq   = new IfuToFtqIO
54}
55
56class UncacheInterface(implicit p: Parameters) extends XSBundle {
57  val fromUncache = Flipped(DecoupledIO(new InsUncacheResp))
58  val toUncache   = DecoupledIO( new InsUncacheReq )
59}
60
61class NewIFUIO(implicit p: Parameters) extends XSBundle {
62  val ftqInter        = new FtqInterface
63  val icacheInter     = Flipped(new IFUICacheIO)
64  val icacheStop      = Output(Bool())
65  val icachePerfInfo  = Input(new ICachePerfInfo)
66  val toIbuffer       = Decoupled(new FetchToIBuffer)
67  val uncacheInter   =  new UncacheInterface
68  val frontendTrigger = Flipped(new FrontendTdataDistributeIO)
69  val csrTriggerEnable = Input(Vec(4, Bool()))
70  val rob_commits = Flipped(Vec(CommitWidth, Valid(new RobCommitInfo)))
71  val iTLBInter       = new TlbRequestIO
72  val pmp             =   new ICachePMPBundle
73  val mmioCommitRead  = new mmioCommitRead
74}
75
76// record the situation in which fallThruAddr falls into
77// the middle of an RVI inst
78class LastHalfInfo(implicit p: Parameters) extends XSBundle {
79  val valid = Bool()
80  val middlePC = UInt(VAddrBits.W)
81  def matchThisBlock(startAddr: UInt) = valid && middlePC === startAddr
82}
83
84class IfuToPreDecode(implicit p: Parameters) extends XSBundle {
85  val data                =  if(HasCExtension) Vec(PredictWidth + 1, UInt(16.W)) else Vec(PredictWidth, UInt(32.W))
86  val frontendTrigger     = new FrontendTdataDistributeIO
87  val csrTriggerEnable    = Vec(4, Bool())
88  val pc                  = Vec(PredictWidth, UInt(VAddrBits.W))
89}
90
91
92class IfuToPredChecker(implicit p: Parameters) extends XSBundle {
93  val ftqOffset     = Valid(UInt(log2Ceil(PredictWidth).W))
94  val jumpOffset    = Vec(PredictWidth, UInt(XLEN.W))
95  val target        = UInt(VAddrBits.W)
96  val instrRange    = Vec(PredictWidth, Bool())
97  val instrValid    = Vec(PredictWidth, Bool())
98  val pds           = Vec(PredictWidth, new PreDecodeInfo)
99  val pc            = Vec(PredictWidth, UInt(VAddrBits.W))
100}
101
102class FetchToIBufferDB extends Bundle {
103  val start_addr = UInt(39.W)
104  val instr_count = UInt(32.W)
105  val exception = Bool()
106  val is_cache_hit = Bool()
107}
108
109class IfuWbToFtqDB extends Bundle {
110  val start_addr = UInt(39.W)
111  val is_miss_pred = Bool()
112  val miss_pred_offset = UInt(32.W)
113  val checkJalFault = Bool()
114  val checkRetFault = Bool()
115  val checkTargetFault = Bool()
116  val checkNotCFIFault = Bool()
117  val checkInvalidTaken = Bool()
118}
119
120class NewIFU(implicit p: Parameters) extends XSModule
121  with HasICacheParameters
122  with HasIFUConst
123  with HasPdConst
124  with HasCircularQueuePtrHelper
125  with HasPerfEvents
126{
127  val io = IO(new NewIFUIO)
128  val (toFtq, fromFtq)    = (io.ftqInter.toFtq, io.ftqInter.fromFtq)
129  val fromICache = io.icacheInter.resp
130  val (toUncache, fromUncache) = (io.uncacheInter.toUncache , io.uncacheInter.fromUncache)
131
132  def isCrossLineReq(start: UInt, end: UInt): Bool = start(blockOffBits) ^ end(blockOffBits)
133
134  def isLastInCacheline(addr: UInt): Bool = addr(blockOffBits - 1, 1) === 0.U
135
136  def numOfStage = 3
137  require(numOfStage > 1, "BPU numOfStage must be greater than 1")
138  val topdown_stages = RegInit(VecInit(Seq.fill(numOfStage)(0.U.asTypeOf(new FrontendTopDownBundle))))
139  // bubble events in IFU, only happen in stage 1
140  val icacheMissBubble = Wire(Bool())
141  val itlbMissBubble =Wire(Bool())
142
143  // only driven by clock, not valid-ready
144  topdown_stages(0) := fromFtq.req.bits.topdown_info
145  for (i <- 1 until numOfStage) {
146    topdown_stages(i) := topdown_stages(i - 1)
147  }
148  when (icacheMissBubble) {
149    topdown_stages(1).reasons(TopDownCounters.ICacheMissBubble.id) := true.B
150  }
151  when (itlbMissBubble) {
152    topdown_stages(1).reasons(TopDownCounters.ITLBMissBubble.id) := true.B
153  }
154  io.toIbuffer.bits.topdown_info := topdown_stages(numOfStage - 1)
155  when (fromFtq.topdown_redirect.valid) {
156    // only redirect from backend, IFU redirect itself is handled elsewhere
157    when (fromFtq.topdown_redirect.bits.debugIsCtrl) {
158      /*
159      for (i <- 0 until numOfStage) {
160        topdown_stages(i).reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
161      }
162      io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
163      */
164      when (fromFtq.topdown_redirect.bits.ControlBTBMissBubble) {
165        for (i <- 0 until numOfStage) {
166          topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B
167        }
168        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B
169      } .elsewhen (fromFtq.topdown_redirect.bits.TAGEMissBubble) {
170        for (i <- 0 until numOfStage) {
171          topdown_stages(i).reasons(TopDownCounters.TAGEMissBubble.id) := true.B
172        }
173        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.TAGEMissBubble.id) := true.B
174      } .elsewhen (fromFtq.topdown_redirect.bits.SCMissBubble) {
175        for (i <- 0 until numOfStage) {
176          topdown_stages(i).reasons(TopDownCounters.SCMissBubble.id) := true.B
177        }
178        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.SCMissBubble.id) := true.B
179      } .elsewhen (fromFtq.topdown_redirect.bits.ITTAGEMissBubble) {
180        for (i <- 0 until numOfStage) {
181          topdown_stages(i).reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
182        }
183        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
184      } .elsewhen (fromFtq.topdown_redirect.bits.RASMissBubble) {
185        for (i <- 0 until numOfStage) {
186          topdown_stages(i).reasons(TopDownCounters.RASMissBubble.id) := true.B
187        }
188        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.RASMissBubble.id) := true.B
189      }
190    } .elsewhen (fromFtq.topdown_redirect.bits.debugIsMemVio) {
191      for (i <- 0 until numOfStage) {
192        topdown_stages(i).reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
193      }
194      io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
195    } .otherwise {
196      for (i <- 0 until numOfStage) {
197        topdown_stages(i).reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
198      }
199      io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
200    }
201  }
202
203  class TlbExept(implicit p: Parameters) extends XSBundle{
204    val pageFault = Bool()
205    val accessFault = Bool()
206    val mmio = Bool()
207  }
208
209  val preDecoder       = Module(new PreDecode)
210
211  val predChecker     = Module(new PredChecker)
212  val frontendTrigger = Module(new FrontendTrigger)
213  val (checkerIn, checkerOutStage1, checkerOutStage2)         = (predChecker.io.in, predChecker.io.out.stage1Out,predChecker.io.out.stage2Out)
214
215  io.iTLBInter.req_kill := false.B
216  io.iTLBInter.resp.ready := true.B
217
218  /**
219    ******************************************************************************
220    * IFU Stage 0
221    * - send cacheline fetch request to ICacheMainPipe
222    ******************************************************************************
223    */
224
225  val f0_valid                             = fromFtq.req.valid
226  val f0_ftq_req                           = fromFtq.req.bits
227  val f0_doubleLine                        = fromFtq.req.bits.crossCacheline
228  val f0_vSetIdx                           = VecInit(get_idx((f0_ftq_req.startAddr)), get_idx(f0_ftq_req.nextlineStart))
229  val f0_fire                              = fromFtq.req.fire
230
231  val f0_flush, f1_flush, f2_flush, f3_flush = WireInit(false.B)
232  val from_bpu_f0_flush, from_bpu_f1_flush, from_bpu_f2_flush, from_bpu_f3_flush = WireInit(false.B)
233
234  from_bpu_f0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(f0_ftq_req.ftqIdx) ||
235                       fromFtq.flushFromBpu.shouldFlushByStage3(f0_ftq_req.ftqIdx)
236
237  val wb_redirect , mmio_redirect,  backend_redirect= WireInit(false.B)
238  val f3_wb_not_flush = WireInit(false.B)
239
240  backend_redirect := fromFtq.redirect.valid
241  f3_flush := backend_redirect || (wb_redirect && !f3_wb_not_flush)
242  f2_flush := backend_redirect || mmio_redirect || wb_redirect
243  f1_flush := f2_flush || from_bpu_f1_flush
244  f0_flush := f1_flush || from_bpu_f0_flush
245
246  val f1_ready, f2_ready, f3_ready         = WireInit(false.B)
247
248  fromFtq.req.ready := f1_ready && io.icacheInter.icacheReady
249
250
251  when (wb_redirect) {
252    when (f3_wb_not_flush) {
253      topdown_stages(2).reasons(TopDownCounters.BTBMissBubble.id) := true.B
254    }
255    for (i <- 0 until numOfStage - 1) {
256      topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B
257    }
258  }
259
260  /** <PERF> f0 fetch bubble */
261
262  XSPerfAccumulate("fetch_bubble_ftq_not_valid",   !fromFtq.req.valid && fromFtq.req.ready  )
263  // XSPerfAccumulate("fetch_bubble_pipe_stall",    f0_valid && toICache(0).ready && toICache(1).ready && !f1_ready )
264  // XSPerfAccumulate("fetch_bubble_icache_0_busy",   f0_valid && !toICache(0).ready  )
265  // XSPerfAccumulate("fetch_bubble_icache_1_busy",   f0_valid && !toICache(1).ready  )
266  XSPerfAccumulate("fetch_flush_backend_redirect",   backend_redirect  )
267  XSPerfAccumulate("fetch_flush_wb_redirect",    wb_redirect  )
268  XSPerfAccumulate("fetch_flush_bpu_f1_flush",   from_bpu_f1_flush  )
269  XSPerfAccumulate("fetch_flush_bpu_f0_flush",   from_bpu_f0_flush  )
270
271
272  /**
273    ******************************************************************************
274    * IFU Stage 1
275    * - calculate pc/half_pc/cut_ptr for every instruction
276    ******************************************************************************
277    */
278
279  val f1_valid      = RegInit(false.B)
280  val f1_ftq_req    = RegEnable(f0_ftq_req,    f0_fire)
281  // val f1_situation  = RegEnable(f0_situation,  f0_fire)
282  val f1_doubleLine = RegEnable(f0_doubleLine, f0_fire)
283  val f1_vSetIdx    = RegEnable(f0_vSetIdx,    f0_fire)
284  val f1_fire       = f1_valid && f2_ready
285
286  f1_ready := f1_fire || !f1_valid
287
288  from_bpu_f1_flush := fromFtq.flushFromBpu.shouldFlushByStage3(f1_ftq_req.ftqIdx) && f1_valid
289  // from_bpu_f1_flush := false.B
290
291  when(f1_flush)                  {f1_valid  := false.B}
292  .elsewhen(f0_fire && !f0_flush) {f1_valid  := true.B}
293  .elsewhen(f1_fire)              {f1_valid  := false.B}
294
295  val f1_pc_adder_cut_point = (VAddrBits/2) - 1 // equal lower_result overflow bit
296  val f1_pc_high            = f1_ftq_req.startAddr(VAddrBits-1,f1_pc_adder_cut_point)
297  val f1_pc_high_plus1      = f1_pc_high + 1.U
298
299  val f1_pc_lower_result    = VecInit((0 until PredictWidth).map(i => Cat(0.U(1.W), f1_ftq_req.startAddr(f1_pc_adder_cut_point-1, 0)) + (i * 2).U)) // cat with overflow bit
300  val f1_pc                 = VecInit(f1_pc_lower_result.map{ i =>
301    Mux(i(f1_pc_adder_cut_point), Cat(f1_pc_high_plus1,i(f1_pc_adder_cut_point-1,0)), Cat(f1_pc_high,i(f1_pc_adder_cut_point-1,0)))})
302
303  val f1_half_snpc_lower_result = VecInit((0 until PredictWidth).map(i => Cat(0.U(1.W), f1_ftq_req.startAddr(f1_pc_adder_cut_point-1, 0)) + ((i+2) * 2).U)) // cat with overflow bit
304  val f1_half_snpc          = VecInit(f1_half_snpc_lower_result.map{i =>
305    Mux(i(f1_pc_adder_cut_point), Cat(f1_pc_high_plus1,i(f1_pc_adder_cut_point-1,0)), Cat(f1_pc_high,i(f1_pc_adder_cut_point-1,0)))})
306
307  if (env.FPGAPlatform){
308    val f1_pc_diff          = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + (i * 2).U))
309    val f1_half_snpc_diff   = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + ((i+2) * 2).U))
310
311    XSError(f1_pc.zip(f1_pc_diff).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_), "f1_half_snpc adder cut fail")
312    XSError(f1_half_snpc.zip(f1_half_snpc_diff).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_),  "f1_half_snpc adder cut fail")
313  }
314
315  val f1_cut_ptr            = if(HasCExtension)  VecInit((0 until PredictWidth + 1).map(i =>  Cat(0.U(2.W), f1_ftq_req.startAddr(blockOffBits-2, 1)) + i.U ))
316                                  else           VecInit((0 until PredictWidth).map(i =>     Cat(0.U(2.W), f1_ftq_req.startAddr(blockOffBits-2, 2)) + i.U ))
317
318  /**
319    ******************************************************************************
320    * IFU Stage 2
321    * - icache response data (latched for pipeline stop)
322    * - generate exceprion bits for every instruciton (page fault/access fault/mmio)
323    * - generate predicted instruction range (1 means this instruciton is in this fetch packet)
324    * - cut data from cachlines to packet instruction code
325    * - instruction predecode and RVC expand
326    ******************************************************************************
327    */
328
329  val icacheRespAllValid = WireInit(false.B)
330
331  val f2_valid      = RegInit(false.B)
332  val f2_ftq_req    = RegEnable(f1_ftq_req,    f1_fire)
333  // val f2_situation  = RegEnable(f1_situation,  f1_fire)
334  val f2_doubleLine = RegEnable(f1_doubleLine, f1_fire)
335  val f2_vSetIdx    = RegEnable(f1_vSetIdx,    f1_fire)
336  val f2_fire       = f2_valid && f3_ready && icacheRespAllValid
337
338  f2_ready := f2_fire || !f2_valid
339  //TODO: addr compare may be timing critical
340  val f2_icache_all_resp_wire       =  fromICache(0).valid && (fromICache(0).bits.vaddr ===  f2_ftq_req.startAddr) && ((fromICache(1).valid && (fromICache(1).bits.vaddr ===  f2_ftq_req.nextlineStart)) || !f2_doubleLine)
341  val f2_icache_all_resp_reg        = RegInit(false.B)
342
343  icacheRespAllValid := f2_icache_all_resp_reg || f2_icache_all_resp_wire
344
345  icacheMissBubble := io.icacheInter.topdownIcacheMiss
346  itlbMissBubble   := io.icacheInter.topdownItlbMiss
347
348  io.icacheStop := !f3_ready
349
350  when(f2_flush)                                              {f2_icache_all_resp_reg := false.B}
351  .elsewhen(f2_valid && f2_icache_all_resp_wire && !f3_ready) {f2_icache_all_resp_reg := true.B}
352  .elsewhen(f2_fire && f2_icache_all_resp_reg)                {f2_icache_all_resp_reg := false.B}
353
354  when(f2_flush)                  {f2_valid := false.B}
355  .elsewhen(f1_fire && !f1_flush) {f2_valid := true.B }
356  .elsewhen(f2_fire)              {f2_valid := false.B}
357
358  val f2_except_pf    = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.pageFault))
359  val f2_except_gpf   = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.guestPageFault))
360  val f2_except_af    = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.accessFault))
361  val f2_gpaddrs      = VecInit((0 until PortNumber).map(i => fromICache(i).bits.gpaddr))
362  val f2_mmio         = fromICache(0).bits.tlbExcp.mmio &&
363    !fromICache(0).bits.tlbExcp.accessFault &&
364    !fromICache(0).bits.tlbExcp.pageFault   &&
365    !fromICache(0).bits.tlbExcp.guestPageFault
366
367  val f2_pc               = RegEnable(f1_pc,  f1_fire)
368  val f2_half_snpc        = RegEnable(f1_half_snpc,  f1_fire)
369  val f2_cut_ptr          = RegEnable(f1_cut_ptr,  f1_fire)
370
371  val f2_resend_vaddr     = RegEnable(f1_ftq_req.startAddr + 2.U,  f1_fire)
372
373  def isNextLine(pc: UInt, startAddr: UInt) = {
374    startAddr(blockOffBits) ^ pc(blockOffBits)
375  }
376
377  def isLastInLine(pc: UInt) = {
378    pc(blockOffBits - 1, 0) === "b111110".U
379  }
380
381  val f2_foldpc = VecInit(f2_pc.map(i => XORFold(i(VAddrBits-1,1), MemPredPCWidth)))
382  val f2_jump_range = Fill(PredictWidth, !f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~f2_ftq_req.ftqOffset.bits
383  val f2_ftr_range  = Fill(PredictWidth,  f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~getBasicBlockIdx(f2_ftq_req.nextStartAddr, f2_ftq_req.startAddr)
384  val f2_instr_range = f2_jump_range & f2_ftr_range
385  val f2_pf_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_pf(0)   ||  isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine &&  f2_except_pf(1))))
386  val f2_af_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_af(0)   ||  isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_af(1))))
387  val f2_gpf_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_gpf(0) || isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_gpf(1))))
388  val f2_gpaddrs_vec = VecInit((0 until PredictWidth).map(i => Mux(!isNextLine(f2_pc(i), f2_ftq_req.startAddr), f2_gpaddrs(0), Mux(isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine, f2_gpaddrs(1), 0.U(GPAddrBits.W)))))
389  val f2_paddrs       = VecInit((0 until PortNumber).map(i => fromICache(i).bits.paddr))
390  val f2_perf_info    = io.icachePerfInfo
391
392  def cut(cacheline: UInt, cutPtr: Vec[UInt]) : Vec[UInt] ={
393    require(HasCExtension)
394    // if(HasCExtension){
395      val result   = Wire(Vec(PredictWidth + 1, UInt(16.W)))
396      val dataVec  = cacheline.asTypeOf(Vec(blockBytes/2, UInt(16.W))) //32 16-bit data vector
397      (0 until PredictWidth + 1).foreach( i =>
398        result(i) := dataVec(cutPtr(i)) //the max ptr is 3*blockBytes/4-1
399      )
400      result
401    // } else {
402    //   val result   = Wire(Vec(PredictWidth, UInt(32.W)) )
403    //   val dataVec  = cacheline.asTypeOf(Vec(blockBytes * 2/ 4, UInt(32.W)))
404    //   (0 until PredictWidth).foreach( i =>
405    //     result(i) := dataVec(cutPtr(i))
406    //   )
407    //   result
408    // }
409  }
410
411  val f2_cache_response_data = fromICache.map(_.bits.data)
412  val f2_data_2_cacheline = Cat(f2_cache_response_data(1), f2_cache_response_data(0))
413
414  val f2_cut_data   = cut(f2_data_2_cacheline, f2_cut_ptr)
415
416  /** predecode (include RVC expander) */
417  // preDecoderRegIn.data := f2_reg_cut_data
418  // preDecoderRegInIn.frontendTrigger := io.frontendTrigger
419  // preDecoderRegInIn.csrTriggerEnable := io.csrTriggerEnable
420  // preDecoderRegIn.pc  := f2_pc
421
422  val preDecoderIn  = preDecoder.io.in
423  preDecoderIn.data := f2_cut_data
424  preDecoderIn.frontendTrigger := io.frontendTrigger
425  preDecoderIn.csrTriggerEnable := io.csrTriggerEnable
426  preDecoderIn.pc  := f2_pc
427  val preDecoderOut = preDecoder.io.out
428
429
430  //val f2_expd_instr     = preDecoderOut.expInstr
431  val f2_instr          = preDecoderOut.instr
432  val f2_pd             = preDecoderOut.pd
433  val f2_jump_offset    = preDecoderOut.jumpOffset
434  val f2_hasHalfValid   =  preDecoderOut.hasHalfValid
435  val f2_crossPageFault = VecInit((0 until PredictWidth).map(i => isLastInLine(f2_pc(i)) && !f2_except_pf(0) && f2_doubleLine &&  f2_except_pf(1) && !f2_pd(i).isRVC ))
436  val f2_crossGuestPageFault = VecInit((0 until PredictWidth).map(i => isLastInLine(f2_pc(i)) && !f2_except_gpf(0) && f2_doubleLine && f2_except_gpf(1) && !f2_pd(i).isRVC ))
437  XSPerfAccumulate("fetch_bubble_icache_not_resp",   f2_valid && !icacheRespAllValid )
438
439
440  /**
441    ******************************************************************************
442    * IFU Stage 3
443    * - handle MMIO instruciton
444    *  -send request to Uncache fetch Unit
445    *  -every packet include 1 MMIO instruction
446    *  -MMIO instructions will stop fetch pipeline until commiting from RoB
447    *  -flush to snpc (send ifu_redirect to Ftq)
448    * - Ibuffer enqueue
449    * - check predict result in Frontend (jalFault/retFault/notCFIFault/invalidTakenFault/targetFault)
450    * - handle last half RVI instruction
451    ******************************************************************************
452    */
453
454  val f3_valid          = RegInit(false.B)
455  val f3_ftq_req        = RegEnable(f2_ftq_req,    f2_fire)
456  // val f3_situation      = RegEnable(f2_situation,  f2_fire)
457  val f3_doubleLine     = RegEnable(f2_doubleLine, f2_fire)
458  val f3_fire           = io.toIbuffer.fire
459
460  f3_ready := f3_fire || !f3_valid
461
462  val f3_cut_data       = RegEnable(f2_cut_data, f2_fire)
463
464  val f3_except_pf      = RegEnable(f2_except_pf,  f2_fire)
465  val f3_except_af      = RegEnable(f2_except_af,  f2_fire)
466  val f3_except_gpf     = RegEnable(f2_except_gpf,  f2_fire)
467  val f3_mmio           = RegEnable(f2_mmio   ,  f2_fire)
468
469  //val f3_expd_instr     = RegEnable(f2_expd_instr,  f2_fire)
470  val f3_instr          = RegEnable(f2_instr, f2_fire)
471  val f3_expd_instr     = VecInit((0 until PredictWidth).map{ i =>
472    val expander       = Module(new RVCExpander)
473    expander.io.in := f3_instr(i)
474    expander.io.out.bits
475  })
476
477  val f3_pd_wire        = RegEnable(f2_pd,          f2_fire)
478  val f3_pd             = WireInit(f3_pd_wire)
479  val f3_jump_offset    = RegEnable(f2_jump_offset, f2_fire)
480  val f3_af_vec         = RegEnable(f2_af_vec,      f2_fire)
481  val f3_pf_vec         = RegEnable(f2_pf_vec ,     f2_fire)
482  val f3_gpf_vec        = RegEnable(next = f2_gpf_vec,     enable = f2_fire)
483  val f3_gpaddrs        = RegEnable(next = f2_gpaddrs_vec, enable = f2_fire)
484  val f3_pc             = RegEnable(f2_pc,          f2_fire)
485  val f3_half_snpc      = RegEnable(f2_half_snpc,   f2_fire)
486  val f3_instr_range    = RegEnable(f2_instr_range, f2_fire)
487  val f3_foldpc         = RegEnable(f2_foldpc,      f2_fire)
488  val f3_crossPageFault = RegEnable(f2_crossPageFault,           f2_fire)
489  val f3_crossGuestPageFault = RegEnable(f2_crossGuestPageFault, f2_fire)
490  val f3_hasHalfValid   = RegEnable(f2_hasHalfValid,             f2_fire)
491  val f3_except         = VecInit((0 until 2).map{i => f3_except_pf(i) || f3_except_af(i) || f3_except_gpf(i)})
492  val f3_has_except     = f3_valid && (f3_except_af.reduce(_||_) || f3_except_pf.reduce(_||_) || f3_except_gpf.reduce(_||_))
493  val f3_pAddrs         = RegEnable(f2_paddrs,  f2_fire)
494  val f3_resend_vaddr   = RegEnable(f2_resend_vaddr,             f2_fire)
495
496  // Expand 1 bit to prevent overflow when assert
497  val f3_ftq_req_startAddr      = Cat(0.U(1.W), f3_ftq_req.startAddr)
498  val f3_ftq_req_nextStartAddr  = Cat(0.U(1.W), f3_ftq_req.nextStartAddr)
499  // brType, isCall and isRet generation is delayed to f3 stage
500  val f3Predecoder = Module(new F3Predecoder)
501
502  f3Predecoder.io.in.instr := f3_instr
503
504  f3_pd.zipWithIndex.map{ case (pd,i) =>
505    pd.brType := f3Predecoder.io.out.pd(i).brType
506    pd.isCall := f3Predecoder.io.out.pd(i).isCall
507    pd.isRet  := f3Predecoder.io.out.pd(i).isRet
508  }
509
510  val f3PdDiff = f3_pd_wire.zip(f3_pd).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_)
511  XSError(f3_valid && f3PdDiff, "f3 pd diff")
512
513  when(f3_valid && !f3_ftq_req.ftqOffset.valid){
514    assert(f3_ftq_req_startAddr + (2*PredictWidth).U >= f3_ftq_req_nextStartAddr, s"More tha ${2*PredictWidth} Bytes fetch is not allowed!")
515  }
516
517  /*** MMIO State Machine***/
518  val f3_mmio_data    = Reg(Vec(2, UInt(16.W)))
519  val mmio_is_RVC     = RegInit(false.B)
520  val mmio_resend_addr =RegInit(0.U(PAddrBits.W))
521  val mmio_resend_af  = RegInit(false.B)
522  val mmio_resend_pf  = RegInit(false.B)
523  val mmio_resend_gpf = RegInit(false.B)
524
525  //last instuction finish
526  val is_first_instr = RegInit(true.B)
527  io.mmioCommitRead.mmioFtqPtr := RegNext(f3_ftq_req.ftqIdx + 1.U)
528
529  val m_idle :: m_waitLastCmt:: m_sendReq :: m_waitResp :: m_sendTLB :: m_tlbResp :: m_sendPMP :: m_resendReq :: m_waitResendResp :: m_waitCommit :: m_commited :: Nil = Enum(11)
530  val mmio_state = RegInit(m_idle)
531
532  val f3_req_is_mmio     = f3_mmio && f3_valid
533  val mmio_commit = VecInit(io.rob_commits.map{commit => commit.valid && commit.bits.ftqIdx === f3_ftq_req.ftqIdx &&  commit.bits.ftqOffset === 0.U}).asUInt.orR
534  val f3_mmio_req_commit = f3_req_is_mmio && mmio_state === m_commited
535
536  val f3_mmio_to_commit =  f3_req_is_mmio && mmio_state === m_waitCommit
537  val f3_mmio_to_commit_next = RegNext(f3_mmio_to_commit)
538  val f3_mmio_can_go      = f3_mmio_to_commit && !f3_mmio_to_commit_next
539
540  val fromFtqRedirectReg    = RegNext(fromFtq.redirect,init = 0.U.asTypeOf(fromFtq.redirect))
541  val mmioF3Flush           = RegNext(f3_flush,init = false.B)
542  val f3_ftq_flush_self     = fromFtqRedirectReg.valid && RedirectLevel.flushItself(fromFtqRedirectReg.bits.level)
543  val f3_ftq_flush_by_older = fromFtqRedirectReg.valid && isBefore(fromFtqRedirectReg.bits.ftqIdx, f3_ftq_req.ftqIdx)
544
545  val f3_need_not_flush = f3_req_is_mmio && fromFtqRedirectReg.valid && !f3_ftq_flush_self && !f3_ftq_flush_by_older
546
547  when(is_first_instr && mmio_commit){
548    is_first_instr := false.B
549  }
550
551  when(f3_flush && !f3_req_is_mmio)                                                 {f3_valid := false.B}
552  .elsewhen(mmioF3Flush && f3_req_is_mmio && !f3_need_not_flush)                    {f3_valid := false.B}
553  .elsewhen(f2_fire && !f2_flush )                                                  {f3_valid := true.B }
554  .elsewhen(io.toIbuffer.fire && !f3_req_is_mmio)                                   {f3_valid := false.B}
555  .elsewhen{f3_req_is_mmio && f3_mmio_req_commit}                                   {f3_valid := false.B}
556
557  val f3_mmio_use_seq_pc = RegInit(false.B)
558
559  val (redirect_ftqIdx, redirect_ftqOffset)  = (fromFtqRedirectReg.bits.ftqIdx,fromFtqRedirectReg.bits.ftqOffset)
560  val redirect_mmio_req = fromFtqRedirectReg.valid && redirect_ftqIdx === f3_ftq_req.ftqIdx && redirect_ftqOffset === 0.U
561
562  when(RegNext(f2_fire && !f2_flush) && f3_req_is_mmio)        { f3_mmio_use_seq_pc := true.B  }
563  .elsewhen(redirect_mmio_req)                                 { f3_mmio_use_seq_pc := false.B }
564
565  f3_ready := Mux(f3_req_is_mmio, io.toIbuffer.ready && f3_mmio_req_commit || !f3_valid , io.toIbuffer.ready || !f3_valid)
566
567  // mmio state machine
568  switch(mmio_state){
569    is(m_idle){
570      when(f3_req_is_mmio){
571        mmio_state :=  m_waitLastCmt
572      }
573    }
574
575    is(m_waitLastCmt){
576      when(is_first_instr){
577        mmio_state := m_sendReq
578      }.otherwise{
579        mmio_state := Mux(io.mmioCommitRead.mmioLastCommit, m_sendReq, m_waitLastCmt)
580      }
581    }
582
583    is(m_sendReq){
584      mmio_state :=  Mux(toUncache.fire, m_waitResp, m_sendReq )
585    }
586
587    is(m_waitResp){
588      when(fromUncache.fire){
589          val isRVC =  fromUncache.bits.data(1,0) =/= 3.U
590          val needResend = !isRVC && f3_pAddrs(0)(2,1) === 3.U
591          mmio_state :=  Mux(needResend, m_sendTLB , m_waitCommit)
592
593          mmio_is_RVC := isRVC
594          f3_mmio_data(0)   :=  fromUncache.bits.data(15,0)
595          f3_mmio_data(1)   :=  fromUncache.bits.data(31,16)
596      }
597    }
598
599    is(m_sendTLB){
600      when( io.iTLBInter.req.valid && !io.iTLBInter.resp.bits.miss ){
601        mmio_state :=  m_tlbResp
602      }
603    }
604
605    is(m_tlbResp){
606      val tlbExept = io.iTLBInter.resp.bits.excp(0).pf.instr ||
607                     io.iTLBInter.resp.bits.excp(0).af.instr ||
608                     io.iTLBInter.resp.bits.excp(0).gpf.instr
609      mmio_state :=  Mux(tlbExept,m_waitCommit,m_sendPMP)
610      mmio_resend_addr := io.iTLBInter.resp.bits.paddr(0)
611      mmio_resend_af := mmio_resend_af || io.iTLBInter.resp.bits.excp(0).af.instr
612      mmio_resend_pf := mmio_resend_pf || io.iTLBInter.resp.bits.excp(0).pf.instr
613      mmio_resend_gpf := mmio_resend_gpf || io.iTLBInter.resp.bits.excp(0).gpf.instr
614    }
615
616    is(m_sendPMP){
617      val pmpExcpAF = io.pmp.resp.instr || !io.pmp.resp.mmio
618      mmio_state :=  Mux(pmpExcpAF, m_waitCommit , m_resendReq)
619      mmio_resend_af := pmpExcpAF
620    }
621
622    is(m_resendReq){
623      mmio_state :=  Mux(toUncache.fire, m_waitResendResp, m_resendReq )
624    }
625
626    is(m_waitResendResp){
627      when(fromUncache.fire){
628          mmio_state :=  m_waitCommit
629          f3_mmio_data(1)   :=  fromUncache.bits.data(15,0)
630      }
631    }
632
633    is(m_waitCommit){
634      when(mmio_commit){
635          mmio_state  :=  m_commited
636      }
637    }
638
639    //normal mmio instruction
640    is(m_commited){
641      mmio_state := m_idle
642      mmio_is_RVC := false.B
643      mmio_resend_addr := 0.U
644    }
645  }
646
647  // Exception or flush by older branch prediction
648  // Condition is from RegNext(fromFtq.redirect), 1 cycle after backend rediect
649  when(f3_ftq_flush_self || f3_ftq_flush_by_older)  {
650    mmio_state := m_idle
651    mmio_is_RVC := false.B
652    mmio_resend_addr := 0.U
653    mmio_resend_af := false.B
654    f3_mmio_data.map(_ := 0.U)
655  }
656
657  toUncache.valid     :=  ((mmio_state === m_sendReq) || (mmio_state === m_resendReq)) && f3_req_is_mmio
658  toUncache.bits.addr := Mux((mmio_state === m_resendReq), mmio_resend_addr, f3_pAddrs(0))
659  fromUncache.ready   := true.B
660
661  io.iTLBInter.req.valid         := (mmio_state === m_sendTLB) && f3_req_is_mmio
662  io.iTLBInter.req.bits.size     := 3.U
663  io.iTLBInter.req.bits.vaddr    := f3_resend_vaddr
664  io.iTLBInter.req.bits.debug.pc := f3_resend_vaddr
665  io.iTLBInter.req.bits.hyperinst:= DontCare
666  io.iTLBInter.req.bits.hlvx     := DontCare
667
668  io.iTLBInter.req.bits.kill                := false.B // IFU use itlb for mmio, doesn't need sync, set it to false
669  io.iTLBInter.req.bits.cmd                 := TlbCmd.exec
670  io.iTLBInter.req.bits.memidx              := DontCare
671  io.iTLBInter.req.bits.debug.robIdx        := DontCare
672  io.iTLBInter.req.bits.no_translate        := false.B
673  io.iTLBInter.req.bits.debug.isFirstIssue  := DontCare
674
675  io.pmp.req.valid := (mmio_state === m_sendPMP) && f3_req_is_mmio
676  io.pmp.req.bits.addr  := mmio_resend_addr
677  io.pmp.req.bits.size  := 3.U
678  io.pmp.req.bits.cmd   := TlbCmd.exec
679
680  val f3_lastHalf       = RegInit(0.U.asTypeOf(new LastHalfInfo))
681
682  val f3_predecode_range = VecInit(preDecoderOut.pd.map(inst => inst.valid)).asUInt
683  val f3_mmio_range      = VecInit((0 until PredictWidth).map(i => if(i ==0) true.B else false.B))
684  val f3_instr_valid     = Wire(Vec(PredictWidth, Bool()))
685
686  /*** prediction result check   ***/
687  checkerIn.ftqOffset   := f3_ftq_req.ftqOffset
688  checkerIn.jumpOffset  := f3_jump_offset
689  checkerIn.target      := f3_ftq_req.nextStartAddr
690  checkerIn.instrRange  := f3_instr_range.asTypeOf(Vec(PredictWidth, Bool()))
691  checkerIn.instrValid  := f3_instr_valid.asTypeOf(Vec(PredictWidth, Bool()))
692  checkerIn.pds         := f3_pd
693  checkerIn.pc          := f3_pc
694
695  /*** handle half RVI in the last 2 Bytes  ***/
696
697  def hasLastHalf(idx: UInt) = {
698    //!f3_pd(idx).isRVC && checkerOutStage1.fixedRange(idx) && f3_instr_valid(idx) && !checkerOutStage1.fixedTaken(idx) && !checkerOutStage2.fixedMissPred(idx) && ! f3_req_is_mmio
699    !f3_pd(idx).isRVC && checkerOutStage1.fixedRange(idx) && f3_instr_valid(idx) && !checkerOutStage1.fixedTaken(idx) && ! f3_req_is_mmio
700  }
701
702  val f3_last_validIdx       = ParallelPosteriorityEncoder(checkerOutStage1.fixedRange)
703
704  val f3_hasLastHalf         = hasLastHalf((PredictWidth - 1).U)
705  val f3_false_lastHalf      = hasLastHalf(f3_last_validIdx)
706  val f3_false_snpc          = f3_half_snpc(f3_last_validIdx)
707
708  val f3_lastHalf_mask    = VecInit((0 until PredictWidth).map( i => if(i ==0) false.B else true.B )).asUInt
709  val f3_lastHalf_disable = RegInit(false.B)
710
711  when(f3_flush || (f3_fire && f3_lastHalf_disable)){
712    f3_lastHalf_disable := false.B
713  }
714
715  when (f3_flush) {
716    f3_lastHalf.valid := false.B
717  }.elsewhen (f3_fire) {
718    f3_lastHalf.valid := f3_hasLastHalf && !f3_lastHalf_disable
719    f3_lastHalf.middlePC := f3_ftq_req.nextStartAddr
720  }
721
722  f3_instr_valid := Mux(f3_lastHalf.valid,f3_hasHalfValid ,VecInit(f3_pd.map(inst => inst.valid)))
723
724  /*** frontend Trigger  ***/
725  frontendTrigger.io.pds  := f3_pd
726  frontendTrigger.io.pc   := f3_pc
727  frontendTrigger.io.data   := f3_cut_data
728
729  frontendTrigger.io.frontendTrigger  := io.frontendTrigger
730  frontendTrigger.io.csrTriggerEnable := io.csrTriggerEnable
731
732  val f3_triggered = frontendTrigger.io.triggered
733
734  /*** send to Ibuffer  ***/
735
736  io.toIbuffer.valid            := f3_valid && (!f3_req_is_mmio || f3_mmio_can_go) && !f3_flush
737  io.toIbuffer.bits.instrs      := f3_expd_instr
738  io.toIbuffer.bits.valid       := f3_instr_valid.asUInt
739  io.toIbuffer.bits.enqEnable   := checkerOutStage1.fixedRange.asUInt & f3_instr_valid.asUInt
740  io.toIbuffer.bits.pd          := f3_pd
741  io.toIbuffer.bits.ftqPtr      := f3_ftq_req.ftqIdx
742  io.toIbuffer.bits.pc          := f3_pc
743  io.toIbuffer.bits.gpaddr      := f3_gpaddrs
744  io.toIbuffer.bits.ftqOffset.zipWithIndex.map{case(a, i) => a.bits := i.U; a.valid := checkerOutStage1.fixedTaken(i) && !f3_req_is_mmio}
745  io.toIbuffer.bits.foldpc      := f3_foldpc
746  io.toIbuffer.bits.ipf         := VecInit(f3_pf_vec.zip(f3_crossPageFault).map{case (pf, crossPF) => pf || crossPF})
747  io.toIbuffer.bits.igpf        := VecInit(f3_gpf_vec.zip(f3_crossGuestPageFault).map{case (gpf, crossGPF) => gpf || crossGPF})
748  io.toIbuffer.bits.acf         := f3_af_vec
749  io.toIbuffer.bits.crossPageIPFFix := (0 until PredictWidth).map(i => f3_crossPageFault(i) || f3_crossGuestPageFault(i))
750  io.toIbuffer.bits.triggered   := f3_triggered
751
752  when(f3_lastHalf.valid){
753    io.toIbuffer.bits.enqEnable := checkerOutStage1.fixedRange.asUInt & f3_instr_valid.asUInt & f3_lastHalf_mask
754    io.toIbuffer.bits.valid     := f3_lastHalf_mask & f3_instr_valid.asUInt
755  }
756
757
758
759  //Write back to Ftq
760  val f3_cache_fetch = f3_valid && !(f2_fire && !f2_flush)
761  val finishFetchMaskReg = RegNext(f3_cache_fetch)
762
763  val mmioFlushWb = Wire(Valid(new PredecodeWritebackBundle))
764  val f3_mmio_missOffset = Wire(ValidUndirectioned(UInt(log2Ceil(PredictWidth).W)))
765  f3_mmio_missOffset.valid := f3_req_is_mmio
766  f3_mmio_missOffset.bits  := 0.U
767
768  // Send mmioFlushWb back to FTQ 1 cycle after uncache fetch return
769  // When backend redirect, mmio_state reset after 1 cycle.
770  // In this case, mask .valid to avoid overriding backend redirect
771  mmioFlushWb.valid           := (f3_req_is_mmio && mmio_state === m_waitCommit && RegNext(fromUncache.fire) &&
772    f3_mmio_use_seq_pc && !f3_ftq_flush_self && !f3_ftq_flush_by_older)
773  mmioFlushWb.bits.pc         := f3_pc
774  mmioFlushWb.bits.pd         := f3_pd
775  mmioFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid :=  f3_mmio_range(i)}
776  mmioFlushWb.bits.ftqIdx     := f3_ftq_req.ftqIdx
777  mmioFlushWb.bits.ftqOffset  := f3_ftq_req.ftqOffset.bits
778  mmioFlushWb.bits.misOffset  := f3_mmio_missOffset
779  mmioFlushWb.bits.cfiOffset  := DontCare
780  mmioFlushWb.bits.target     := Mux(mmio_is_RVC, f3_ftq_req.startAddr + 2.U , f3_ftq_req.startAddr + 4.U)
781  mmioFlushWb.bits.jalTarget  := DontCare
782  mmioFlushWb.bits.instrRange := f3_mmio_range
783
784  /** external predecode for MMIO instruction */
785  when(f3_req_is_mmio){
786    val inst  = Cat(f3_mmio_data(1), f3_mmio_data(0))
787    val currentIsRVC   = isRVC(inst)
788
789    val brType::isCall::isRet::Nil = brInfo(inst)
790    val jalOffset = jal_offset(inst, currentIsRVC)
791    val brOffset  = br_offset(inst, currentIsRVC)
792
793    io.toIbuffer.bits.instrs(0) := new RVCDecoder(inst, XLEN, useAddiForMv = true).decode.bits
794
795
796    io.toIbuffer.bits.pd(0).valid   := true.B
797    io.toIbuffer.bits.pd(0).isRVC   := currentIsRVC
798    io.toIbuffer.bits.pd(0).brType  := brType
799    io.toIbuffer.bits.pd(0).isCall  := isCall
800    io.toIbuffer.bits.pd(0).isRet   := isRet
801
802    io.toIbuffer.bits.acf(0) := mmio_resend_af
803    io.toIbuffer.bits.ipf(0) := mmio_resend_pf
804    io.toIbuffer.bits.crossPageIPFFix(0) := mmio_resend_pf
805
806    io.toIbuffer.bits.enqEnable   := f3_mmio_range.asUInt
807
808    mmioFlushWb.bits.pd(0).valid   := true.B
809    mmioFlushWb.bits.pd(0).isRVC   := currentIsRVC
810    mmioFlushWb.bits.pd(0).brType  := brType
811    mmioFlushWb.bits.pd(0).isCall  := isCall
812    mmioFlushWb.bits.pd(0).isRet   := isRet
813  }
814
815  mmio_redirect := (f3_req_is_mmio && mmio_state === m_waitCommit && RegNext(fromUncache.fire)  && f3_mmio_use_seq_pc)
816
817  XSPerfAccumulate("fetch_bubble_ibuffer_not_ready",   io.toIbuffer.valid && !io.toIbuffer.ready )
818
819
820  /**
821    ******************************************************************************
822    * IFU Write Back Stage
823    * - write back predecode information to Ftq to update
824    * - redirect if found fault prediction
825    * - redirect if has false hit last half (last PC is not start + 32 Bytes, but in the midle of an notCFI RVI instruction)
826    ******************************************************************************
827    */
828
829  val wb_valid          = RegNext(RegNext(f2_fire && !f2_flush) && !f3_req_is_mmio && !f3_flush)
830  val wb_ftq_req        = RegNext(f3_ftq_req)
831
832  val wb_check_result_stage1   = RegNext(checkerOutStage1)
833  val wb_check_result_stage2   = checkerOutStage2
834  val wb_instr_range    = RegNext(io.toIbuffer.bits.enqEnable)
835  val wb_pc             = RegNext(f3_pc)
836  val wb_pd             = RegNext(f3_pd)
837  val wb_instr_valid    = RegNext(f3_instr_valid)
838
839  /* false hit lastHalf */
840  val wb_lastIdx        = RegNext(f3_last_validIdx)
841  val wb_false_lastHalf = RegNext(f3_false_lastHalf) && wb_lastIdx =/= (PredictWidth - 1).U
842  val wb_false_target   = RegNext(f3_false_snpc)
843
844  val wb_half_flush = wb_false_lastHalf
845  val wb_half_target = wb_false_target
846
847  /* false oversize */
848  val lastIsRVC = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool())).last  && wb_pd.last.isRVC
849  val lastIsRVI = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool()))(PredictWidth - 2) && !wb_pd(PredictWidth - 2).isRVC
850  val lastTaken = wb_check_result_stage1.fixedTaken.last
851
852  f3_wb_not_flush := wb_ftq_req.ftqIdx === f3_ftq_req.ftqIdx && f3_valid && wb_valid
853
854  /** if a req with a last half but miss predicted enters in wb stage, and this cycle f3 stalls,
855    * we set a flag to notify f3 that the last half flag need not to be set.
856    */
857  //f3_fire is after wb_valid
858  when(wb_valid && RegNext(f3_hasLastHalf,init = false.B)
859        && wb_check_result_stage2.fixedMissPred(PredictWidth - 1) && !f3_fire  && !RegNext(f3_fire,init = false.B) && !f3_flush
860      ){
861    f3_lastHalf_disable := true.B
862  }
863
864  //wb_valid and f3_fire are in same cycle
865  when(wb_valid && RegNext(f3_hasLastHalf,init = false.B)
866        && wb_check_result_stage2.fixedMissPred(PredictWidth - 1) && f3_fire
867      ){
868    f3_lastHalf.valid := false.B
869  }
870
871  val checkFlushWb = Wire(Valid(new PredecodeWritebackBundle))
872  val checkFlushWbjalTargetIdx = ParallelPriorityEncoder(VecInit(wb_pd.zip(wb_instr_valid).map{case (pd, v) => v && pd.isJal }))
873  val checkFlushWbTargetIdx = ParallelPriorityEncoder(wb_check_result_stage2.fixedMissPred)
874  checkFlushWb.valid                  := wb_valid
875  checkFlushWb.bits.pc                := wb_pc
876  checkFlushWb.bits.pd                := wb_pd
877  checkFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := wb_instr_valid(i)}
878  checkFlushWb.bits.ftqIdx            := wb_ftq_req.ftqIdx
879  checkFlushWb.bits.ftqOffset         := wb_ftq_req.ftqOffset.bits
880  checkFlushWb.bits.misOffset.valid   := ParallelOR(wb_check_result_stage2.fixedMissPred) || wb_half_flush
881  checkFlushWb.bits.misOffset.bits    := Mux(wb_half_flush, wb_lastIdx, ParallelPriorityEncoder(wb_check_result_stage2.fixedMissPred))
882  checkFlushWb.bits.cfiOffset.valid   := ParallelOR(wb_check_result_stage1.fixedTaken)
883  checkFlushWb.bits.cfiOffset.bits    := ParallelPriorityEncoder(wb_check_result_stage1.fixedTaken)
884  checkFlushWb.bits.target            := Mux(wb_half_flush, wb_half_target, wb_check_result_stage2.fixedTarget(checkFlushWbTargetIdx))
885  checkFlushWb.bits.jalTarget         := wb_check_result_stage2.jalTarget(checkFlushWbjalTargetIdx)
886  checkFlushWb.bits.instrRange        := wb_instr_range.asTypeOf(Vec(PredictWidth, Bool()))
887
888  toFtq.pdWb := Mux(wb_valid, checkFlushWb,  mmioFlushWb)
889
890  wb_redirect := checkFlushWb.bits.misOffset.valid && wb_valid
891
892  /*write back flush type*/
893  val checkFaultType = wb_check_result_stage2.faultType
894  val checkJalFault =  wb_valid && checkFaultType.map(_.isjalFault).reduce(_||_)
895  val checkRetFault =  wb_valid && checkFaultType.map(_.isRetFault).reduce(_||_)
896  val checkTargetFault =  wb_valid && checkFaultType.map(_.istargetFault).reduce(_||_)
897  val checkNotCFIFault =  wb_valid && checkFaultType.map(_.notCFIFault).reduce(_||_)
898  val checkInvalidTaken =  wb_valid && checkFaultType.map(_.invalidTakenFault).reduce(_||_)
899
900
901  XSPerfAccumulate("predecode_flush_jalFault",   checkJalFault )
902  XSPerfAccumulate("predecode_flush_retFault",   checkRetFault )
903  XSPerfAccumulate("predecode_flush_targetFault",   checkTargetFault )
904  XSPerfAccumulate("predecode_flush_notCFIFault",   checkNotCFIFault )
905  XSPerfAccumulate("predecode_flush_incalidTakenFault",   checkInvalidTaken )
906
907  when(checkRetFault){
908    XSDebug("startAddr:%x  nextstartAddr:%x  taken:%d    takenIdx:%d\n",
909        wb_ftq_req.startAddr, wb_ftq_req.nextStartAddr, wb_ftq_req.ftqOffset.valid, wb_ftq_req.ftqOffset.bits)
910  }
911
912
913  /** performance counter */
914  val f3_perf_info     = RegEnable(f2_perf_info,  f2_fire)
915  val f3_req_0    = io.toIbuffer.fire
916  val f3_req_1    = io.toIbuffer.fire && f3_doubleLine
917  val f3_hit_0    = io.toIbuffer.fire && f3_perf_info.bank_hit(0)
918  val f3_hit_1    = io.toIbuffer.fire && f3_doubleLine & f3_perf_info.bank_hit(1)
919  val f3_hit      = f3_perf_info.hit
920  val perfEvents = Seq(
921    ("frontendFlush                ", wb_redirect                                ),
922    ("ifu_req                      ", io.toIbuffer.fire                        ),
923    ("ifu_miss                     ", io.toIbuffer.fire && !f3_perf_info.hit   ),
924    ("ifu_req_cacheline_0          ", f3_req_0                                   ),
925    ("ifu_req_cacheline_1          ", f3_req_1                                   ),
926    ("ifu_req_cacheline_0_hit      ", f3_hit_1                                   ),
927    ("ifu_req_cacheline_1_hit      ", f3_hit_1                                   ),
928    ("only_0_hit                   ", f3_perf_info.only_0_hit       && io.toIbuffer.fire ),
929    ("only_0_miss                  ", f3_perf_info.only_0_miss      && io.toIbuffer.fire ),
930    ("hit_0_hit_1                  ", f3_perf_info.hit_0_hit_1      && io.toIbuffer.fire ),
931    ("hit_0_miss_1                 ", f3_perf_info.hit_0_miss_1     && io.toIbuffer.fire ),
932    ("miss_0_hit_1                 ", f3_perf_info.miss_0_hit_1     && io.toIbuffer.fire ),
933    ("miss_0_miss_1                ", f3_perf_info.miss_0_miss_1    && io.toIbuffer.fire ),
934  )
935  generatePerfEvent()
936
937  XSPerfAccumulate("ifu_req",   io.toIbuffer.fire )
938  XSPerfAccumulate("ifu_miss",  io.toIbuffer.fire && !f3_hit )
939  XSPerfAccumulate("ifu_req_cacheline_0", f3_req_0  )
940  XSPerfAccumulate("ifu_req_cacheline_1", f3_req_1  )
941  XSPerfAccumulate("ifu_req_cacheline_0_hit",   f3_hit_0 )
942  XSPerfAccumulate("ifu_req_cacheline_1_hit",   f3_hit_1 )
943  XSPerfAccumulate("frontendFlush",  wb_redirect )
944  XSPerfAccumulate("only_0_hit",      f3_perf_info.only_0_hit   && io.toIbuffer.fire  )
945  XSPerfAccumulate("only_0_miss",     f3_perf_info.only_0_miss  && io.toIbuffer.fire  )
946  XSPerfAccumulate("hit_0_hit_1",     f3_perf_info.hit_0_hit_1  && io.toIbuffer.fire  )
947  XSPerfAccumulate("hit_0_miss_1",    f3_perf_info.hit_0_miss_1  && io.toIbuffer.fire  )
948  XSPerfAccumulate("miss_0_hit_1",    f3_perf_info.miss_0_hit_1   && io.toIbuffer.fire )
949  XSPerfAccumulate("miss_0_miss_1",   f3_perf_info.miss_0_miss_1 && io.toIbuffer.fire )
950  XSPerfAccumulate("hit_0_except_1",   f3_perf_info.hit_0_except_1 && io.toIbuffer.fire )
951  XSPerfAccumulate("miss_0_except_1",   f3_perf_info.miss_0_except_1 && io.toIbuffer.fire )
952  XSPerfAccumulate("except_0",   f3_perf_info.except_0 && io.toIbuffer.fire )
953  XSPerfHistogram("ifu2ibuffer_validCnt", PopCount(io.toIbuffer.bits.valid & io.toIbuffer.bits.enqEnable), io.toIbuffer.fire, 0, PredictWidth + 1, 1)
954
955  val isWriteFetchToIBufferTable = WireInit(Constantin.createRecord("isWriteFetchToIBufferTable" + p(XSCoreParamsKey).HartId.toString))
956  val isWriteIfuWbToFtqTable = WireInit(Constantin.createRecord("isWriteIfuWbToFtqTable" + p(XSCoreParamsKey).HartId.toString))
957  val fetchToIBufferTable = ChiselDB.createTable("FetchToIBuffer" + p(XSCoreParamsKey).HartId.toString, new FetchToIBufferDB)
958  val ifuWbToFtqTable = ChiselDB.createTable("IfuWbToFtq" + p(XSCoreParamsKey).HartId.toString, new IfuWbToFtqDB)
959
960  val fetchIBufferDumpData = Wire(new FetchToIBufferDB)
961  fetchIBufferDumpData.start_addr := f3_ftq_req.startAddr
962  fetchIBufferDumpData.instr_count := PopCount(io.toIbuffer.bits.enqEnable)
963  fetchIBufferDumpData.exception := (f3_perf_info.except_0 && io.toIbuffer.fire) || (f3_perf_info.hit_0_except_1 && io.toIbuffer.fire) || (f3_perf_info.miss_0_except_1 && io.toIbuffer.fire)
964  fetchIBufferDumpData.is_cache_hit := f3_hit
965
966  val ifuWbToFtqDumpData = Wire(new IfuWbToFtqDB)
967  ifuWbToFtqDumpData.start_addr := wb_ftq_req.startAddr
968  ifuWbToFtqDumpData.is_miss_pred := checkFlushWb.bits.misOffset.valid
969  ifuWbToFtqDumpData.miss_pred_offset := checkFlushWb.bits.misOffset.bits
970  ifuWbToFtqDumpData.checkJalFault := checkJalFault
971  ifuWbToFtqDumpData.checkRetFault := checkRetFault
972  ifuWbToFtqDumpData.checkTargetFault := checkTargetFault
973  ifuWbToFtqDumpData.checkNotCFIFault := checkNotCFIFault
974  ifuWbToFtqDumpData.checkInvalidTaken := checkInvalidTaken
975
976  fetchToIBufferTable.log(
977    data = fetchIBufferDumpData,
978    en = isWriteFetchToIBufferTable.orR && io.toIbuffer.fire,
979    site = "IFU" + p(XSCoreParamsKey).HartId.toString,
980    clock = clock,
981    reset = reset
982  )
983  ifuWbToFtqTable.log(
984    data = ifuWbToFtqDumpData,
985    en = isWriteIfuWbToFtqTable.orR && checkFlushWb.valid,
986    site = "IFU" + p(XSCoreParamsKey).HartId.toString,
987    clock = clock,
988    reset = reset
989  )
990
991}
992