xref: /XiangShan/src/main/scala/xiangshan/frontend/IFU.scala (revision 3a6db8a39a25f02047d1fb2b257c89be0b2c36dc)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.rocket.RVCDecoder
23import xiangshan._
24import xiangshan.cache.mmu._
25import xiangshan.frontend.icache._
26import utils._
27import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
28
29trait HasInstrMMIOConst extends HasXSParameter with HasIFUConst{
30  def mmioBusWidth = 64
31  def mmioBusBytes = mmioBusWidth / 8
32  def maxInstrLen = 32
33}
34
35trait HasIFUConst extends HasXSParameter{
36  def addrAlign(addr: UInt, bytes: Int, highest: Int): UInt = Cat(addr(highest-1, log2Ceil(bytes)), 0.U(log2Ceil(bytes).W))
37  def fetchQueueSize = 2
38
39  def getBasicBlockIdx( pc: UInt, start:  UInt ): UInt = {
40    val byteOffset = pc - start
41    (byteOffset - instBytes.U)(log2Ceil(PredictWidth),instOffsetBits)
42  }
43}
44
45class IfuToFtqIO(implicit p:Parameters) extends XSBundle {
46  val pdWb = Valid(new PredecodeWritebackBundle)
47}
48
49class FtqInterface(implicit p: Parameters) extends XSBundle {
50  val fromFtq = Flipped(new FtqToIfuIO)
51  val toFtq   = new IfuToFtqIO
52}
53
54class UncacheInterface(implicit p: Parameters) extends XSBundle {
55  val fromUncache = Flipped(DecoupledIO(new InsUncacheResp))
56  val toUncache   = DecoupledIO( new InsUncacheReq )
57}
58class NewIFUIO(implicit p: Parameters) extends XSBundle {
59  val ftqInter        = new FtqInterface
60  val icacheInter     = Vec(2, Flipped(new ICacheMainPipeBundle))
61  val icacheStop      = Output(Bool())
62  val icachePerfInfo  = Input(new ICachePerfInfo)
63  val toIbuffer       = Decoupled(new FetchToIBuffer)
64  val uncacheInter   =  new UncacheInterface
65  val frontendTrigger = Flipped(new FrontendTdataDistributeIO)
66  val csrTriggerEnable = Input(Vec(4, Bool()))
67  val rob_commits = Flipped(Vec(CommitWidth, Valid(new RobCommitInfo)))
68}
69
70// record the situation in which fallThruAddr falls into
71// the middle of an RVI inst
72class LastHalfInfo(implicit p: Parameters) extends XSBundle {
73  val valid = Bool()
74  val middlePC = UInt(VAddrBits.W)
75  def matchThisBlock(startAddr: UInt) = valid && middlePC === startAddr
76}
77
78class IfuToPreDecode(implicit p: Parameters) extends XSBundle {
79  val data                =  if(HasCExtension) Vec(PredictWidth + 1, UInt(16.W)) else Vec(PredictWidth, UInt(32.W))
80  val frontendTrigger     = new FrontendTdataDistributeIO
81  val csrTriggerEnable    = Vec(4, Bool())
82  val pc                  = Vec(PredictWidth, UInt(VAddrBits.W))
83}
84
85
86class IfuToPredChecker(implicit p: Parameters) extends XSBundle {
87  val ftqOffset     = Valid(UInt(log2Ceil(PredictWidth).W))
88  val jumpOffset    = Vec(PredictWidth, UInt(XLEN.W))
89  val target        = UInt(VAddrBits.W)
90  val instrRange    = Vec(PredictWidth, Bool())
91  val instrValid    = Vec(PredictWidth, Bool())
92  val pds           = Vec(PredictWidth, new PreDecodeInfo)
93  val pc            = Vec(PredictWidth, UInt(VAddrBits.W))
94}
95
96class NewIFU(implicit p: Parameters) extends XSModule
97  with HasICacheParameters
98  with HasIFUConst
99  with HasPdConst
100  with HasCircularQueuePtrHelper
101  with HasPerfEvents
102{
103  println(s"icache ways: ${nWays} sets:${nSets}")
104  val io = IO(new NewIFUIO)
105  val (toFtq, fromFtq)    = (io.ftqInter.toFtq, io.ftqInter.fromFtq)
106  val (toICache, fromICache) = (VecInit(io.icacheInter.map(_.req)), VecInit(io.icacheInter.map(_.resp)))
107  val (toUncache, fromUncache) = (io.uncacheInter.toUncache , io.uncacheInter.fromUncache)
108
109  def isCrossLineReq(start: UInt, end: UInt): Bool = start(blockOffBits) ^ end(blockOffBits)
110
111  def isLastInCacheline(fallThruAddr: UInt): Bool = fallThruAddr(blockOffBits - 1, 1) === 0.U
112
113  class TlbExept(implicit p: Parameters) extends XSBundle{
114    val pageFault = Bool()
115    val accessFault = Bool()
116    val mmio = Bool()
117  }
118
119  val preDecoder      = Module(new PreDecode)
120  val predChecker     = Module(new PredChecker)
121  val frontendTrigger = Module(new FrontendTrigger)
122  val (preDecoderIn, preDecoderOut)   = (preDecoder.io.in, preDecoder.io.out)
123  val (checkerIn, checkerOut)         = (predChecker.io.in, predChecker.io.out)
124
125  /**
126    ******************************************************************************
127    * IFU Stage 0
128    * - send cacheline fetch request to ICacheMainPipe
129    ******************************************************************************
130    */
131
132  val f0_valid                             = fromFtq.req.valid
133  val f0_ftq_req                           = fromFtq.req.bits
134  val f0_situation                         = VecInit(Seq(isCrossLineReq(f0_ftq_req.startAddr, f0_ftq_req.fallThruAddr), isLastInCacheline(f0_ftq_req.fallThruAddr)))
135  val f0_doubleLine                        = f0_situation(0) || f0_situation(1)
136  val f0_vSetIdx                           = VecInit(get_idx((f0_ftq_req.startAddr)), get_idx(f0_ftq_req.fallThruAddr))
137  val f0_fire                              = fromFtq.req.fire()
138
139  val f0_flush, f1_flush, f2_flush, f3_flush = WireInit(false.B)
140  val from_bpu_f0_flush, from_bpu_f1_flush, from_bpu_f2_flush, from_bpu_f3_flush = WireInit(false.B)
141
142  from_bpu_f0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(f0_ftq_req.ftqIdx) ||
143                       fromFtq.flushFromBpu.shouldFlushByStage3(f0_ftq_req.ftqIdx)
144
145  val wb_redirect , mmio_redirect,  backend_redirect= WireInit(false.B)
146  val f3_wb_not_flush = WireInit(false.B)
147
148  backend_redirect := fromFtq.redirect.valid
149  f3_flush := backend_redirect || (wb_redirect && !f3_wb_not_flush)
150  f2_flush := backend_redirect || mmio_redirect || wb_redirect
151  f1_flush := f2_flush || from_bpu_f1_flush
152  f0_flush := f1_flush || from_bpu_f0_flush
153
154  val f1_ready, f2_ready, f3_ready         = WireInit(false.B)
155
156  fromFtq.req.ready := toICache(0).ready && toICache(1).ready && f2_ready && GTimer() > 500.U
157
158  toICache(0).valid       := fromFtq.req.valid && !f0_flush
159  toICache(0).bits.vaddr  := fromFtq.req.bits.startAddr
160  toICache(1).valid       := fromFtq.req.valid && f0_doubleLine && !f0_flush
161  toICache(1).bits.vaddr  := fromFtq.req.bits.fallThruAddr
162
163  /** <PERF> f0 fetch bubble */
164
165  XSPerfAccumulate("fetch_bubble_ftq_not_valid",   !f0_valid )
166  XSPerfAccumulate("fetch_bubble_pipe_stall",    f0_valid && toICache(0).ready && toICache(1).ready && !f1_ready )
167  XSPerfAccumulate("fetch_bubble_sram_0_busy",   f0_valid && !toICache(0).ready  )
168  XSPerfAccumulate("fetch_bubble_sram_1_busy",   f0_valid && !toICache(1).ready  )
169
170
171  /**
172    ******************************************************************************
173    * IFU Stage 1
174    * - calculate pc/half_pc/cut_ptr for every instruction
175    ******************************************************************************
176    */
177
178  val f1_valid      = RegInit(false.B)
179  val f1_ftq_req    = RegEnable(next = f0_ftq_req,    enable=f0_fire)
180  val f1_situation  = RegEnable(next = f0_situation,  enable=f0_fire)
181  val f1_doubleLine = RegEnable(next = f0_doubleLine, enable=f0_fire)
182  val f1_vSetIdx    = RegEnable(next = f0_vSetIdx,    enable=f0_fire)
183  val f1_fire       = f1_valid && f1_ready
184
185  f1_ready := f2_ready || !f1_valid
186
187  from_bpu_f1_flush := fromFtq.flushFromBpu.shouldFlushByStage3(f1_ftq_req.ftqIdx)
188
189  when(f1_flush)                  {f1_valid  := false.B}
190  .elsewhen(f0_fire && !f0_flush) {f1_valid  := true.B}
191  .elsewhen(f1_fire)              {f1_valid  := false.B}
192
193  val f1_pc                 = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + (i * 2).U))
194  val f1_half_snpc          = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + ((i+2) * 2).U))
195  val f1_cut_ptr            = if(HasCExtension)  VecInit((0 until PredictWidth + 1).map(i =>  Cat(0.U(1.W), f1_ftq_req.startAddr(blockOffBits-1, 1)) + i.U ))
196                                  else           VecInit((0 until PredictWidth).map(i =>     Cat(0.U(1.W), f1_ftq_req.startAddr(blockOffBits-1, 2)) + i.U ))
197
198  /**
199    ******************************************************************************
200    * IFU Stage 2
201    * - icache response data (latched for pipeline stop)
202    * - generate exceprion bits for every instruciton (page fault/access fault/mmio)
203    * - generate predicted instruction range (1 means this instruciton is in this fetch packet)
204    * - cut data from cachlines to packet instruction code
205    * - instruction predecode and RVC expand
206    ******************************************************************************
207    */
208
209  val icacheRespAllValid = WireInit(false.B)
210
211  val f2_valid      = RegInit(false.B)
212  val f2_ftq_req    = RegEnable(next = f1_ftq_req,    enable=f1_fire)
213  val f2_situation  = RegEnable(next = f1_situation,  enable=f1_fire)
214  val f2_doubleLine = RegEnable(next = f1_doubleLine, enable=f1_fire)
215  val f2_vSetIdx    = RegEnable(next = f1_vSetIdx,    enable=f1_fire)
216  val f2_fire       = f2_valid && f2_ready
217
218  f2_ready := f3_ready && icacheRespAllValid || !f2_valid
219  //TODO: addr compare may be timing critical
220  val f2_icache_all_resp_wire       =  fromICache(0).valid && (fromICache(0).bits.vaddr ===  f2_ftq_req.startAddr) && ((fromICache(1).valid && (fromICache(1).bits.vaddr ===  f2_ftq_req.fallThruAddr)) || !f2_doubleLine)
221  val f2_icache_all_resp_reg        = RegInit(false.B)
222
223  icacheRespAllValid := f2_icache_all_resp_reg || f2_icache_all_resp_wire
224
225  io.icacheStop := !f3_ready
226
227  when(f2_flush)                                              {f2_icache_all_resp_reg := false.B}
228  .elsewhen(f2_valid && f2_icache_all_resp_wire && !f3_ready) {f2_icache_all_resp_reg := true.B}
229  .elsewhen(f2_fire && f2_icache_all_resp_reg)                {f2_icache_all_resp_reg := false.B}
230
231  when(f2_flush)                  {f2_valid := false.B}
232  .elsewhen(f1_fire && !f1_flush) {f2_valid := true.B }
233  .elsewhen(f2_fire)              {f2_valid := false.B}
234
235  val f2_cache_response_data = ResultHoldBypass(valid = f2_icache_all_resp_wire, data = VecInit(fromICache.map(_.bits.readData)))
236
237  val f2_except_pf    = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.pageFault))
238  val f2_except_af    = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.accessFault))
239  val f2_mmio         = fromICache(0).bits.tlbExcp.mmio && !fromICache(0).bits.tlbExcp.accessFault &&
240                                                           !fromICache(0).bits.tlbExcp.pageFault
241
242  val f2_pc               = RegEnable(next = f1_pc, enable = f1_fire)
243  val f2_half_snpc        = RegEnable(next = f1_half_snpc, enable = f1_fire)
244  val f2_cut_ptr          = RegEnable(next = f1_cut_ptr, enable = f1_fire)
245
246
247  def isNextLine(pc: UInt, startAddr: UInt) = {
248    startAddr(blockOffBits) ^ pc(blockOffBits)
249  }
250
251  def isLastInLine(pc: UInt) = {
252    pc(blockOffBits - 1, 0) === "b111110".U
253  }
254
255  val f2_foldpc = VecInit(f2_pc.map(i => XORFold(i(VAddrBits-1,1), MemPredPCWidth)))
256  val f2_jump_range = Fill(PredictWidth, !f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~f2_ftq_req.ftqOffset.bits
257  val f2_ftr_range  = Fill(PredictWidth, f2_ftq_req.oversize) | Fill(PredictWidth, 1.U(1.W)) >> ~getBasicBlockIdx(f2_ftq_req.fallThruAddr, f2_ftq_req.startAddr)
258  val f2_instr_range = f2_jump_range & f2_ftr_range
259  val f2_pf_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_pf(0)   ||  isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine &&  f2_except_pf(1))))
260  val f2_af_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_af(0)   ||  isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_af(1))))
261
262  val f2_paddrs       = VecInit((0 until PortNumber).map(i => fromICache(i).bits.paddr))
263  val f2_perf_info    = io.icachePerfInfo
264
265  def cut(cacheline: UInt, cutPtr: Vec[UInt]) : Vec[UInt] ={
266    if(HasCExtension){
267      val result   = Wire(Vec(PredictWidth + 1, UInt(16.W)))
268      val dataVec  = cacheline.asTypeOf(Vec(blockBytes * 2/ 2, UInt(16.W)))
269      (0 until PredictWidth + 1).foreach( i =>
270        result(i) := dataVec(cutPtr(i))
271      )
272      result
273    } else {
274      val result   = Wire(Vec(PredictWidth, UInt(32.W)) )
275      val dataVec  = cacheline.asTypeOf(Vec(blockBytes * 2/ 4, UInt(32.W)))
276      (0 until PredictWidth).foreach( i =>
277        result(i) := dataVec(cutPtr(i))
278      )
279      result
280    }
281  }
282
283  val f2_datas        = VecInit((0 until PortNumber).map(i => f2_cache_response_data(i)))
284  val f2_cut_data = cut( Cat(f2_datas.map(cacheline => cacheline.asUInt ).reverse).asUInt, f2_cut_ptr )
285
286  /** predecode (include RVC expander) */
287  preDecoderIn.data := f2_cut_data
288  preDecoderIn.frontendTrigger := io.frontendTrigger
289  preDecoderIn.csrTriggerEnable := io.csrTriggerEnable
290  preDecoderIn.pc  := f2_pc
291
292  val f2_expd_instr   = preDecoderOut.expInstr
293  val f2_pd           = preDecoderOut.pd
294  val f2_jump_offset  = preDecoderOut.jumpOffset
295  val f2_hasHalfValid  =  preDecoderOut.hasHalfValid
296  val f2_crossPageFault = VecInit((0 until PredictWidth).map(i => isLastInLine(f2_pc(i)) && !f2_except_pf(0) && f2_doubleLine &&  f2_except_pf(1) && !f2_pd(i).isRVC ))
297
298  val predecodeOutValid = WireInit(false.B)
299
300
301  /**
302    ******************************************************************************
303    * IFU Stage 3
304    * - handle MMIO instruciton
305    *  -send request to Uncache fetch Unit
306    *  -every packet include 1 MMIO instruction
307    *  -MMIO instructions will stop fetch pipeline until commiting from RoB
308    *  -flush to snpc (send ifu_redirect to Ftq)
309    * - Ibuffer enqueue
310    * - check predict result in Frontend (jalFault/retFault/notCFIFault/invalidTakenFault/targetFault)
311    * - handle last half RVI instruction
312    ******************************************************************************
313    */
314
315  val f3_valid          = RegInit(false.B)
316  val f3_ftq_req        = RegEnable(next = f2_ftq_req,    enable=f2_fire)
317  val f3_situation      = RegEnable(next = f2_situation,  enable=f2_fire)
318  val f3_doubleLine     = RegEnable(next = f2_doubleLine, enable=f2_fire)
319  val f3_fire           = io.toIbuffer.fire()
320
321  f3_ready := io.toIbuffer.ready || !f3_valid
322
323  val f3_cut_data       = RegEnable(next = f2_cut_data, enable=f2_fire)
324
325  val f3_except_pf      = RegEnable(next = f2_except_pf, enable = f2_fire)
326  val f3_except_af      = RegEnable(next = f2_except_af, enable = f2_fire)
327  val f3_mmio           = RegEnable(next = f2_mmio   , enable = f2_fire)
328
329  val f3_expd_instr     = RegEnable(next = f2_expd_instr,  enable = f2_fire)
330  val f3_pd             = RegEnable(next = f2_pd,          enable = f2_fire)
331  val f3_jump_offset    = RegEnable(next = f2_jump_offset, enable = f2_fire)
332  val f3_af_vec         = RegEnable(next = f2_af_vec,      enable = f2_fire)
333  val f3_pf_vec         = RegEnable(next = f2_pf_vec ,     enable = f2_fire)
334  val f3_pc             = RegEnable(next = f2_pc,          enable = f2_fire)
335  val f3_half_snpc        = RegEnable(next = f2_half_snpc, enable = f2_fire)
336  val f3_instr_range    = RegEnable(next = f2_instr_range, enable = f2_fire)
337  val f3_foldpc         = RegEnable(next = f2_foldpc,      enable = f2_fire)
338  val f3_crossPageFault = RegEnable(next = f2_crossPageFault,      enable = f2_fire)
339  val f3_hasHalfValid   = RegEnable(next = f2_hasHalfValid,      enable = f2_fire)
340  val f3_except         = VecInit((0 until 2).map{i => f3_except_pf(i) || f3_except_af(i)})
341  val f3_has_except     = f3_valid && (f3_except_af.reduce(_||_) || f3_except_pf.reduce(_||_))
342  val f3_pAddrs   = RegEnable(next = f2_paddrs, enable = f2_fire)
343
344  val f3_oversize_target = f3_pc.last + 2.U
345
346  /*** MMIO State Machine***/
347  val f3_mmio_data    = Reg(UInt(maxInstrLen.W))
348
349  val mmio_idle :: mmio_send_req :: mmio_w_resp :: mmio_resend :: mmio_resend_w_resp :: mmio_wait_commit :: mmio_commited :: Nil = Enum(7)
350  val mmio_state = RegInit(mmio_idle)
351
352  val f3_req_is_mmio     = f3_mmio && f3_valid
353  val mmio_commit = VecInit(io.rob_commits.map{commit => commit.valid && commit.bits.ftqIdx === f3_ftq_req.ftqIdx &&  commit.bits.ftqOffset === 0.U}).asUInt.orR
354  val f3_mmio_req_commit = f3_req_is_mmio && mmio_state === mmio_commited
355
356  val f3_mmio_to_commit =  f3_req_is_mmio && mmio_state === mmio_wait_commit
357  val f3_mmio_to_commit_next = RegNext(f3_mmio_to_commit)
358  val f3_mmio_can_go      = f3_mmio_to_commit && !f3_mmio_to_commit_next
359
360  val f3_ftq_flush_self     = fromFtq.redirect.valid && RedirectLevel.flushItself(fromFtq.redirect.bits.level)
361  val f3_ftq_flush_by_older = fromFtq.redirect.valid && isBefore(fromFtq.redirect.bits.ftqIdx, f3_ftq_req.ftqIdx)
362
363  val f3_need_not_flush = f3_req_is_mmio && fromFtq.redirect.valid && !f3_ftq_flush_self && !f3_ftq_flush_by_older
364
365  when(f3_flush && !f3_need_not_flush)               {f3_valid := false.B}
366  .elsewhen(f2_fire && !f2_flush )                   {f3_valid := true.B }
367  .elsewhen(io.toIbuffer.fire() && !f3_req_is_mmio)          {f3_valid := false.B}
368  .elsewhen{f3_req_is_mmio && f3_mmio_req_commit}            {f3_valid := false.B}
369
370  val f3_mmio_use_seq_pc = RegInit(false.B)
371
372  val (redirect_ftqIdx, redirect_ftqOffset)  = (fromFtq.redirect.bits.ftqIdx,fromFtq.redirect.bits.ftqOffset)
373  val redirect_mmio_req = fromFtq.redirect.valid && redirect_ftqIdx === f3_ftq_req.ftqIdx && redirect_ftqOffset === 0.U
374
375  when(RegNext(f2_fire && !f2_flush) && f3_req_is_mmio)        { f3_mmio_use_seq_pc := true.B  }
376  .elsewhen(redirect_mmio_req)                                 { f3_mmio_use_seq_pc := false.B }
377
378  f3_ready := Mux(f3_req_is_mmio, io.toIbuffer.ready && f3_mmio_req_commit || !f3_valid , io.toIbuffer.ready || !f3_valid)
379
380  when(fromUncache.fire())    {f3_mmio_data   :=  fromUncache.bits.data}
381
382
383  switch(mmio_state){
384    is(mmio_idle){
385      when(f3_req_is_mmio){
386        mmio_state :=  mmio_send_req
387      }
388    }
389
390    is(mmio_send_req){
391      mmio_state :=  Mux(toUncache.fire(), mmio_w_resp, mmio_send_req )
392    }
393
394    is(mmio_w_resp){
395      when(fromUncache.fire()){
396          val isRVC =  fromUncache.bits.data(1,0) =/= 3.U
397          mmio_state :=  Mux(isRVC, mmio_resend , mmio_wait_commit)
398      }
399    }
400
401    is(mmio_resend){
402      mmio_state :=  Mux(toUncache.fire(), mmio_resend_w_resp, mmio_resend )
403    }
404
405    is(mmio_resend_w_resp){
406      when(fromUncache.fire()){
407          mmio_state :=  mmio_wait_commit
408      }
409    }
410
411    is(mmio_wait_commit){
412      when(mmio_commit){
413          mmio_state  :=  mmio_commited
414      }
415    }
416
417    is(mmio_commited){
418        mmio_state := mmio_idle
419    }
420  }
421
422  when(f3_ftq_flush_self || f3_ftq_flush_by_older)  {
423    mmio_state := mmio_idle
424    f3_mmio_data := 0.U
425  }
426
427  toUncache.valid     :=  ((mmio_state === mmio_send_req) || (mmio_state === mmio_resend)) && f3_req_is_mmio
428  toUncache.bits.addr := Mux((mmio_state === mmio_resend), f3_pAddrs(0) + 2.U, f3_pAddrs(0))
429  fromUncache.ready   := true.B
430
431
432  val f3_lastHalf       = RegInit(0.U.asTypeOf(new LastHalfInfo))
433
434  val f3_predecode_range = VecInit(preDecoderOut.pd.map(inst => inst.valid)).asUInt
435  val f3_mmio_range      = VecInit((0 until PredictWidth).map(i => if(i ==0) true.B else false.B))
436  val f3_instr_valid     = Wire(Vec(PredictWidth, Bool()))
437
438  /*** prediction result check   ***/
439  checkerIn.ftqOffset   := f3_ftq_req.ftqOffset
440  checkerIn.jumpOffset  := f3_jump_offset
441  checkerIn.target      := f3_ftq_req.target
442  checkerIn.instrRange  := f3_instr_range.asTypeOf(Vec(PredictWidth, Bool()))
443  checkerIn.instrValid  := f3_instr_valid.asTypeOf(Vec(PredictWidth, Bool()))
444  checkerIn.pds         := f3_pd
445  checkerIn.pc          := f3_pc
446
447  /*** handle half RVI in the last 2 Bytes  ***/
448
449  def hasLastHalf(idx: UInt) = {
450    !f3_pd(idx).isRVC && checkerOut.fixedRange(idx) && f3_instr_valid(idx) && !checkerOut.fixedTaken(idx) && !checkerOut.fixedMissPred(idx) && ! f3_req_is_mmio && !f3_ftq_req.oversize
451  }
452
453  val f3_last_validIdx             = ~ParallelPriorityEncoder(checkerOut.fixedRange.reverse)
454
455  val f3_hasLastHalf         = hasLastHalf((PredictWidth - 1).U)
456  val f3_false_lastHalf      = hasLastHalf(f3_last_validIdx)
457  val f3_false_snpc          = f3_half_snpc(f3_last_validIdx)
458
459  val f3_lastHalf_mask    = VecInit((0 until PredictWidth).map( i => if(i ==0) false.B else true.B )).asUInt()
460
461  when (f3_flush) {
462    f3_lastHalf.valid := false.B
463  }.elsewhen (f3_fire) {
464    f3_lastHalf.valid := f3_hasLastHalf
465    f3_lastHalf.middlePC := f3_ftq_req.fallThruAddr
466  }
467
468  f3_instr_valid := Mux(f3_lastHalf.valid,f3_hasHalfValid ,VecInit(f3_pd.map(inst => inst.valid)))
469
470  /*** frontend Trigger  ***/
471  frontendTrigger.io.pds  := f3_pd
472  frontendTrigger.io.pc   := f3_pc
473  frontendTrigger.io.data   := f3_cut_data
474
475  frontendTrigger.io.frontendTrigger  := io.frontendTrigger
476  frontendTrigger.io.csrTriggerEnable := io.csrTriggerEnable
477
478  val f3_triggered = frontendTrigger.io.triggered
479
480  /*** send to Ibuffer  ***/
481
482  io.toIbuffer.valid            := f3_valid && (!f3_req_is_mmio || f3_mmio_can_go) && !f3_flush
483  io.toIbuffer.bits.instrs      := f3_expd_instr
484  io.toIbuffer.bits.valid       := f3_instr_valid.asUInt
485  io.toIbuffer.bits.enqEnable   := checkerOut.fixedRange.asUInt & f3_instr_valid.asUInt
486  io.toIbuffer.bits.pd          := f3_pd
487  io.toIbuffer.bits.ftqPtr      := f3_ftq_req.ftqIdx
488  io.toIbuffer.bits.pc          := f3_pc
489  io.toIbuffer.bits.ftqOffset.zipWithIndex.map{case(a, i) => a.bits := i.U; a.valid := checkerOut.fixedTaken(i) && !f3_req_is_mmio}
490  io.toIbuffer.bits.foldpc      := f3_foldpc
491  io.toIbuffer.bits.ipf         := f3_pf_vec
492  io.toIbuffer.bits.acf         := f3_af_vec
493  io.toIbuffer.bits.crossPageIPFFix := f3_crossPageFault
494  io.toIbuffer.bits.triggered   := f3_triggered
495
496  val lastHalfMask = VecInit((0 until PredictWidth).map(i => if(i ==0) false.B else true.B))
497  when(f3_lastHalf.valid){
498    io.toIbuffer.bits.enqEnable := checkerOut.fixedRange.asUInt & f3_instr_valid.asUInt & lastHalfMask.asUInt
499    io.toIbuffer.bits.valid     := f3_lastHalf_mask & f3_instr_valid.asUInt
500  }
501
502  /** external predecode for MMIO instruction */
503  when(f3_req_is_mmio){
504    val inst  = Cat(f3_mmio_data(31,16), f3_mmio_data(15,0))
505    val currentIsRVC   = isRVC(inst)
506
507    val brType::isCall::isRet::Nil = brInfo(inst)
508    val jalOffset = jal_offset(inst, currentIsRVC)
509    val brOffset  = br_offset(inst, currentIsRVC)
510
511    io.toIbuffer.bits.instrs (0) := new RVCDecoder(inst, XLEN).decode.bits
512
513    io.toIbuffer.bits.pd(0).valid   := true.B
514    io.toIbuffer.bits.pd(0).isRVC   := currentIsRVC
515    io.toIbuffer.bits.pd(0).brType  := brType
516    io.toIbuffer.bits.pd(0).isCall  := isCall
517    io.toIbuffer.bits.pd(0).isRet   := isRet
518
519    io.toIbuffer.bits.enqEnable   := f3_mmio_range.asUInt
520  }
521
522
523  //Write back to Ftq
524  val f3_cache_fetch = f3_valid && !(f2_fire && !f2_flush)
525  val finishFetchMaskReg = RegNext(f3_cache_fetch)
526
527  val mmioFlushWb = Wire(Valid(new PredecodeWritebackBundle))
528  val f3_mmio_missOffset = Wire(ValidUndirectioned(UInt(log2Ceil(PredictWidth).W)))
529  f3_mmio_missOffset.valid := f3_req_is_mmio
530  f3_mmio_missOffset.bits  := 0.U
531
532  mmioFlushWb.valid           := (f3_req_is_mmio && mmio_state === mmio_wait_commit && RegNext(fromUncache.fire())  && f3_mmio_use_seq_pc)
533  mmioFlushWb.bits.pc         := f3_pc
534  mmioFlushWb.bits.pd         := f3_pd
535  mmioFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid :=  f3_mmio_range(i)}
536  mmioFlushWb.bits.ftqIdx     := f3_ftq_req.ftqIdx
537  mmioFlushWb.bits.ftqOffset  := f3_ftq_req.ftqOffset.bits
538  mmioFlushWb.bits.misOffset  := f3_mmio_missOffset
539  mmioFlushWb.bits.cfiOffset  := DontCare
540  mmioFlushWb.bits.target     := Mux((f3_mmio_data(1,0) =/= 3.U), f3_ftq_req.startAddr + 2.U , f3_ftq_req.startAddr + 4.U)
541  mmioFlushWb.bits.jalTarget  := DontCare
542  mmioFlushWb.bits.instrRange := f3_mmio_range
543
544  mmio_redirect := (f3_req_is_mmio && mmio_state === mmio_wait_commit && RegNext(fromUncache.fire())  && f3_mmio_use_seq_pc)
545
546  /**
547    ******************************************************************************
548    * IFU Write Back Stage
549    * - write back predecode information to Ftq to update
550    * - redirect if found fault prediction
551    * - redirect if has false hit last half (last PC is not start + 32 Bytes, but in the midle of an notCFI RVI instruction)
552    ******************************************************************************
553    */
554
555  val wb_valid          = RegNext(RegNext(f2_fire && !f2_flush) && !f3_req_is_mmio && !f3_flush)
556  val wb_ftq_req        = RegNext(f3_ftq_req)
557
558  val wb_check_result   = RegNext(checkerOut)
559  val wb_instr_range    = RegNext(io.toIbuffer.bits.enqEnable)
560  val wb_pc             = RegNext(f3_pc)
561  val wb_pd             = RegNext(f3_pd)
562  val wb_instr_valid    = RegNext(f3_instr_valid)
563
564  /* false hit lastHalf */
565  val wb_lastIdx        = RegNext(f3_last_validIdx)
566  val wb_false_lastHalf = RegNext(f3_false_lastHalf) && wb_lastIdx =/= (PredictWidth - 1).U
567  val wb_false_target   = RegNext(f3_false_snpc)
568
569  val wb_half_flush = wb_false_lastHalf
570  val wb_half_target = wb_false_target
571
572  /* false oversize */
573  val lastIsRVC = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool())).last  && wb_pd.last.isRVC
574  val lastIsRVI = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool()))(PredictWidth - 2) && !wb_pd(PredictWidth - 2).isRVC
575  val lastTaken = wb_check_result.fixedTaken.last
576  val wb_false_oversize = wb_valid &&  wb_ftq_req.oversize && (lastIsRVC || lastIsRVI) && !lastTaken
577  val wb_oversize_target = RegNext(f3_oversize_target)
578
579  when(wb_valid){
580    assert(!wb_false_oversize || !wb_half_flush, "False oversize and false half should be exclusive. ")
581  }
582
583  f3_wb_not_flush := wb_ftq_req.ftqIdx === f3_ftq_req.ftqIdx && f3_valid && wb_valid
584
585  val checkFlushWb = Wire(Valid(new PredecodeWritebackBundle))
586  checkFlushWb.valid                  := wb_valid
587  checkFlushWb.bits.pc                := wb_pc
588  checkFlushWb.bits.pd                := wb_pd
589  checkFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := wb_instr_valid(i)}
590  checkFlushWb.bits.ftqIdx            := wb_ftq_req.ftqIdx
591  checkFlushWb.bits.ftqOffset         := wb_ftq_req.ftqOffset.bits
592  checkFlushWb.bits.misOffset.valid   := ParallelOR(wb_check_result.fixedMissPred) || wb_half_flush || wb_false_oversize
593  checkFlushWb.bits.misOffset.bits    := Mux(wb_half_flush, (PredictWidth - 1).U, ParallelPriorityEncoder(wb_check_result.fixedMissPred))
594  checkFlushWb.bits.cfiOffset.valid   := ParallelOR(wb_check_result.fixedTaken)
595  checkFlushWb.bits.cfiOffset.bits    := ParallelPriorityEncoder(wb_check_result.fixedTaken)
596  checkFlushWb.bits.target            := Mux(wb_false_oversize, wb_oversize_target,
597                                            Mux(wb_half_flush, wb_half_target, wb_check_result.fixedTarget(ParallelPriorityEncoder(wb_check_result.fixedMissPred))))
598  checkFlushWb.bits.jalTarget         := wb_check_result.fixedTarget(ParallelPriorityEncoder(VecInit(wb_pd.map{pd => pd.isJal })))
599  checkFlushWb.bits.instrRange        := wb_instr_range.asTypeOf(Vec(PredictWidth, Bool()))
600
601  toFtq.pdWb := Mux(f3_req_is_mmio, mmioFlushWb,  checkFlushWb)
602
603  wb_redirect := checkFlushWb.bits.misOffset.valid && wb_valid
604
605
606  /** performance counter */
607  val f3_perf_info     = RegEnable(next = f2_perf_info, enable = f2_fire)
608  val f3_req_0    = io.toIbuffer.fire()
609  val f3_req_1    = io.toIbuffer.fire() && f3_doubleLine
610  val f3_hit_0    = io.toIbuffer.fire() && f3_perf_info.bank_hit(0)
611  val f3_hit_1    = io.toIbuffer.fire() && f3_doubleLine & f3_perf_info.bank_hit(1)
612  val f3_hit      = f3_perf_info.hit
613  val perfEvents = Seq(
614    ("frontendFlush                ", wb_redirect                                ),
615    ("ifu_req                      ", io.toIbuffer.fire()                        ),
616    ("ifu_miss                     ", io.toIbuffer.fire() && !f3_perf_info.hit   ),
617    ("ifu_req_cacheline_0          ", f3_req_0                                   ),
618    ("ifu_req_cacheline_1          ", f3_req_1                                   ),
619    ("ifu_req_cacheline_0_hit      ", f3_hit_1                                   ),
620    ("ifu_req_cacheline_1_hit      ", f3_hit_1                                   ),
621    ("only_0_hit                   ", f3_perf_info.only_0_hit       && io.toIbuffer.fire() ),
622    ("only_0_miss                  ", f3_perf_info.only_0_miss      && io.toIbuffer.fire() ),
623    ("hit_0_hit_1                  ", f3_perf_info.hit_0_hit_1      && io.toIbuffer.fire() ),
624    ("hit_0_miss_1                 ", f3_perf_info.hit_0_miss_1     && io.toIbuffer.fire() ),
625    ("miss_0_hit_1                 ", f3_perf_info.miss_0_hit_1     && io.toIbuffer.fire() ),
626    ("miss_0_miss_1                ", f3_perf_info.miss_0_miss_1    && io.toIbuffer.fire() ),
627    ("cross_line_block             ", io.toIbuffer.fire() && f3_situation(0)     ),
628    ("fall_through_is_cacheline_end", io.toIbuffer.fire() && f3_situation(1)     ),
629  )
630  generatePerfEvent()
631
632  XSPerfAccumulate("ifu_req",   io.toIbuffer.fire() )
633  XSPerfAccumulate("ifu_miss",  io.toIbuffer.fire() && !f3_hit )
634  XSPerfAccumulate("ifu_req_cacheline_0", f3_req_0  )
635  XSPerfAccumulate("ifu_req_cacheline_1", f3_req_1  )
636  XSPerfAccumulate("ifu_req_cacheline_0_hit",   f3_hit_0 )
637  XSPerfAccumulate("ifu_req_cacheline_1_hit",   f3_hit_1 )
638  XSPerfAccumulate("frontendFlush",  wb_redirect )
639  XSPerfAccumulate("only_0_hit",      f3_perf_info.only_0_hit   && io.toIbuffer.fire()  )
640  XSPerfAccumulate("only_0_miss",     f3_perf_info.only_0_miss  && io.toIbuffer.fire()  )
641  XSPerfAccumulate("hit_0_hit_1",     f3_perf_info.hit_0_hit_1  && io.toIbuffer.fire()  )
642  XSPerfAccumulate("hit_0_miss_1",    f3_perf_info.hit_0_miss_1  && io.toIbuffer.fire()  )
643  XSPerfAccumulate("miss_0_hit_1",    f3_perf_info.miss_0_hit_1   && io.toIbuffer.fire() )
644  XSPerfAccumulate("miss_0_miss_1",   f3_perf_info.miss_0_miss_1 && io.toIbuffer.fire() )
645  XSPerfAccumulate("cross_line_block", io.toIbuffer.fire() && f3_situation(0) )
646  XSPerfAccumulate("fall_through_is_cacheline_end", io.toIbuffer.fire() && f3_situation(1) )
647}
648