xref: /XiangShan/src/main/scala/xiangshan/frontend/IFU.scala (revision 2dfa9e7608858a060c152f9a99efd1629db38447)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.rocket.RVCDecoder
23import xiangshan._
24import xiangshan.cache.mmu._
25import xiangshan.frontend.icache._
26import utils._
27import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
28
29trait HasInstrMMIOConst extends HasXSParameter with HasIFUConst{
30  def mmioBusWidth = 64
31  def mmioBusBytes = mmioBusWidth / 8
32  def maxInstrLen = 32
33}
34
35trait HasIFUConst extends HasXSParameter{
36  def addrAlign(addr: UInt, bytes: Int, highest: Int): UInt = Cat(addr(highest-1, log2Ceil(bytes)), 0.U(log2Ceil(bytes).W))
37  def fetchQueueSize = 2
38
39  def getBasicBlockIdx( pc: UInt, start:  UInt ): UInt = {
40    val byteOffset = pc - start
41    (byteOffset - instBytes.U)(log2Ceil(PredictWidth),instOffsetBits)
42  }
43}
44
45class IfuToFtqIO(implicit p:Parameters) extends XSBundle {
46  val pdWb = Valid(new PredecodeWritebackBundle)
47}
48
49class FtqInterface(implicit p: Parameters) extends XSBundle {
50  val fromFtq = Flipped(new FtqToIfuIO)
51  val toFtq   = new IfuToFtqIO
52}
53
54class UncacheInterface(implicit p: Parameters) extends XSBundle {
55  val fromUncache = Flipped(DecoupledIO(new InsUncacheResp))
56  val toUncache   = DecoupledIO( new InsUncacheReq )
57}
58class NewIFUIO(implicit p: Parameters) extends XSBundle {
59  val ftqInter        = new FtqInterface
60  val icacheInter     = Flipped(new IFUICacheIO)
61  val icacheStop      = Output(Bool())
62  val icachePerfInfo  = Input(new ICachePerfInfo)
63  val toIbuffer       = Decoupled(new FetchToIBuffer)
64  val uncacheInter   =  new UncacheInterface
65  val frontendTrigger = Flipped(new FrontendTdataDistributeIO)
66  val csrTriggerEnable = Input(Vec(4, Bool()))
67  val rob_commits = Flipped(Vec(CommitWidth, Valid(new RobCommitInfo)))
68  val iTLBInter       = new TlbRequestIO
69  val pmp             =   new ICachePMPBundle
70}
71
72// record the situation in which fallThruAddr falls into
73// the middle of an RVI inst
74class LastHalfInfo(implicit p: Parameters) extends XSBundle {
75  val valid = Bool()
76  val middlePC = UInt(VAddrBits.W)
77  def matchThisBlock(startAddr: UInt) = valid && middlePC === startAddr
78}
79
80class IfuToPreDecode(implicit p: Parameters) extends XSBundle {
81  val data                =  if(HasCExtension) Vec(PredictWidth + 1, UInt(16.W)) else Vec(PredictWidth, UInt(32.W))
82  val frontendTrigger     = new FrontendTdataDistributeIO
83  val csrTriggerEnable    = Vec(4, Bool())
84  val pc                  = Vec(PredictWidth, UInt(VAddrBits.W))
85}
86
87
88class IfuToPredChecker(implicit p: Parameters) extends XSBundle {
89  val ftqOffset     = Valid(UInt(log2Ceil(PredictWidth).W))
90  val jumpOffset    = Vec(PredictWidth, UInt(XLEN.W))
91  val target        = UInt(VAddrBits.W)
92  val instrRange    = Vec(PredictWidth, Bool())
93  val instrValid    = Vec(PredictWidth, Bool())
94  val pds           = Vec(PredictWidth, new PreDecodeInfo)
95  val pc            = Vec(PredictWidth, UInt(VAddrBits.W))
96}
97
98class NewIFU(implicit p: Parameters) extends XSModule
99  with HasICacheParameters
100  with HasIFUConst
101  with HasPdConst
102  with HasCircularQueuePtrHelper
103  with HasPerfEvents
104{
105  val io = IO(new NewIFUIO)
106  val (toFtq, fromFtq)    = (io.ftqInter.toFtq, io.ftqInter.fromFtq)
107  val fromICache = io.icacheInter.resp
108  val (toUncache, fromUncache) = (io.uncacheInter.toUncache , io.uncacheInter.fromUncache)
109
110  def isCrossLineReq(start: UInt, end: UInt): Bool = start(blockOffBits) ^ end(blockOffBits)
111
112  def isLastInCacheline(addr: UInt): Bool = addr(blockOffBits - 1, 1) === 0.U
113
114  class TlbExept(implicit p: Parameters) extends XSBundle{
115    val pageFault = Bool()
116    val accessFault = Bool()
117    val mmio = Bool()
118  }
119
120  val preDecoders       = Seq.fill(4){ Module(new PreDecode) }
121
122  val predChecker     = Module(new PredChecker)
123  val frontendTrigger = Module(new FrontendTrigger)
124  val (checkerIn, checkerOutStage1, checkerOutStage2)         = (predChecker.io.in, predChecker.io.out.stage1Out,predChecker.io.out.stage2Out)
125
126  io.iTLBInter.req_kill := false.B
127  io.iTLBInter.resp.ready := true.B
128
129  /**
130    ******************************************************************************
131    * IFU Stage 0
132    * - send cacheline fetch request to ICacheMainPipe
133    ******************************************************************************
134    */
135
136  val f0_valid                             = fromFtq.req.valid
137  val f0_ftq_req                           = fromFtq.req.bits
138  val f0_doubleLine                        = fromFtq.req.bits.crossCacheline
139  val f0_vSetIdx                           = VecInit(get_idx((f0_ftq_req.startAddr)), get_idx(f0_ftq_req.nextlineStart))
140  val f0_fire                              = fromFtq.req.fire()
141
142  val f0_flush, f1_flush, f2_flush, f3_flush = WireInit(false.B)
143  val from_bpu_f0_flush, from_bpu_f1_flush, from_bpu_f2_flush, from_bpu_f3_flush = WireInit(false.B)
144
145  from_bpu_f0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(f0_ftq_req.ftqIdx) ||
146                       fromFtq.flushFromBpu.shouldFlushByStage3(f0_ftq_req.ftqIdx)
147
148  val wb_redirect , mmio_redirect,  backend_redirect= WireInit(false.B)
149  val f3_wb_not_flush = WireInit(false.B)
150
151  backend_redirect := fromFtq.redirect.valid
152  f3_flush := backend_redirect || (wb_redirect && !f3_wb_not_flush)
153  f2_flush := backend_redirect || mmio_redirect || wb_redirect
154  f1_flush := f2_flush || from_bpu_f1_flush
155  f0_flush := f1_flush || from_bpu_f0_flush
156
157  val f1_ready, f2_ready, f3_ready         = WireInit(false.B)
158
159  fromFtq.req.ready := f1_ready && io.icacheInter.icacheReady
160
161  /** <PERF> f0 fetch bubble */
162
163  XSPerfAccumulate("fetch_bubble_ftq_not_valid",   !fromFtq.req.valid && fromFtq.req.ready  )
164  // XSPerfAccumulate("fetch_bubble_pipe_stall",    f0_valid && toICache(0).ready && toICache(1).ready && !f1_ready )
165  // XSPerfAccumulate("fetch_bubble_icache_0_busy",   f0_valid && !toICache(0).ready  )
166  // XSPerfAccumulate("fetch_bubble_icache_1_busy",   f0_valid && !toICache(1).ready  )
167  XSPerfAccumulate("fetch_flush_backend_redirect",   backend_redirect  )
168  XSPerfAccumulate("fetch_flush_wb_redirect",    wb_redirect  )
169  XSPerfAccumulate("fetch_flush_bpu_f1_flush",   from_bpu_f1_flush  )
170  XSPerfAccumulate("fetch_flush_bpu_f0_flush",   from_bpu_f0_flush  )
171
172
173  /**
174    ******************************************************************************
175    * IFU Stage 1
176    * - calculate pc/half_pc/cut_ptr for every instruction
177    ******************************************************************************
178    */
179
180  val f1_valid      = RegInit(false.B)
181  val f1_ftq_req    = RegEnable(f0_ftq_req,    f0_fire)
182  // val f1_situation  = RegEnable(f0_situation,  f0_fire)
183  val f1_doubleLine = RegEnable(f0_doubleLine, f0_fire)
184  val f1_vSetIdx    = RegEnable(f0_vSetIdx,    f0_fire)
185  val f1_fire       = f1_valid && f2_ready
186
187  f1_ready := f1_fire || !f1_valid
188
189  from_bpu_f1_flush := fromFtq.flushFromBpu.shouldFlushByStage3(f1_ftq_req.ftqIdx) && f1_valid
190  // from_bpu_f1_flush := false.B
191
192  when(f1_flush)                  {f1_valid  := false.B}
193  .elsewhen(f0_fire && !f0_flush) {f1_valid  := true.B}
194  .elsewhen(f1_fire)              {f1_valid  := false.B}
195
196  val f1_pc                 = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + (i * 2).U))
197  val f1_half_snpc          = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + ((i+2) * 2).U))
198  val f1_cut_ptr            = if(HasCExtension)  VecInit((0 until PredictWidth + 1).map(i =>  Cat(0.U(1.W), f1_ftq_req.startAddr(blockOffBits-1, 1)) + i.U ))
199                                  else           VecInit((0 until PredictWidth).map(i =>     Cat(0.U(1.W), f1_ftq_req.startAddr(blockOffBits-1, 2)) + i.U ))
200
201  /**
202    ******************************************************************************
203    * IFU Stage 2
204    * - icache response data (latched for pipeline stop)
205    * - generate exceprion bits for every instruciton (page fault/access fault/mmio)
206    * - generate predicted instruction range (1 means this instruciton is in this fetch packet)
207    * - cut data from cachlines to packet instruction code
208    * - instruction predecode and RVC expand
209    ******************************************************************************
210    */
211
212  val icacheRespAllValid = WireInit(false.B)
213
214  val f2_valid      = RegInit(false.B)
215  val f2_ftq_req    = RegEnable(f1_ftq_req,    f1_fire)
216  // val f2_situation  = RegEnable(f1_situation,  f1_fire)
217  val f2_doubleLine = RegEnable(f1_doubleLine, f1_fire)
218  val f2_vSetIdx    = RegEnable(f1_vSetIdx,    f1_fire)
219  val f2_fire       = f2_valid && f3_ready && icacheRespAllValid
220
221  f2_ready := f2_fire || !f2_valid
222  //TODO: addr compare may be timing critical
223  val f2_icache_all_resp_wire       =  fromICache(0).valid && (fromICache(0).bits.vaddr ===  f2_ftq_req.startAddr) && ((fromICache(1).valid && (fromICache(1).bits.vaddr ===  f2_ftq_req.nextlineStart)) || !f2_doubleLine)
224  val f2_icache_all_resp_reg        = RegInit(false.B)
225
226  icacheRespAllValid := f2_icache_all_resp_reg || f2_icache_all_resp_wire
227
228  io.icacheStop := !f3_ready
229
230  when(f2_flush)                                              {f2_icache_all_resp_reg := false.B}
231  .elsewhen(f2_valid && f2_icache_all_resp_wire && !f3_ready) {f2_icache_all_resp_reg := true.B}
232  .elsewhen(f2_fire && f2_icache_all_resp_reg)                {f2_icache_all_resp_reg := false.B}
233
234  when(f2_flush)                  {f2_valid := false.B}
235  .elsewhen(f1_fire && !f1_flush) {f2_valid := true.B }
236  .elsewhen(f2_fire)              {f2_valid := false.B}
237
238  // val f2_cache_response_data = ResultHoldBypass(valid = f2_icache_all_resp_wire, data = VecInit(fromICache.map(_.bits.readData)))
239  val f2_cache_response_reg_data  = VecInit(fromICache.map(_.bits.registerData))
240  val f2_cache_response_sram_data = VecInit(fromICache.map(_.bits.sramData))
241  val f2_cache_response_select    = VecInit(fromICache.map(_.bits.select))
242
243
244  val f2_except_pf    = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.pageFault))
245  val f2_except_af    = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.accessFault))
246  val f2_mmio         = fromICache(0).bits.tlbExcp.mmio && !fromICache(0).bits.tlbExcp.accessFault &&
247                                                           !fromICache(0).bits.tlbExcp.pageFault
248
249  val f2_pc               = RegEnable(f1_pc,  f1_fire)
250  val f2_half_snpc        = RegEnable(f1_half_snpc,  f1_fire)
251  val f2_cut_ptr          = RegEnable(f1_cut_ptr,  f1_fire)
252
253  val f2_resend_vaddr     = RegEnable(f1_ftq_req.startAddr + 2.U,  f1_fire)
254
255  def isNextLine(pc: UInt, startAddr: UInt) = {
256    startAddr(blockOffBits) ^ pc(blockOffBits)
257  }
258
259  def isLastInLine(pc: UInt) = {
260    pc(blockOffBits - 1, 0) === "b111110".U
261  }
262
263  val f2_foldpc = VecInit(f2_pc.map(i => XORFold(i(VAddrBits-1,1), MemPredPCWidth)))
264  val f2_jump_range = Fill(PredictWidth, !f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~f2_ftq_req.ftqOffset.bits
265  val f2_ftr_range  = Fill(PredictWidth,  f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~getBasicBlockIdx(f2_ftq_req.nextStartAddr, f2_ftq_req.startAddr)
266  val f2_instr_range = f2_jump_range & f2_ftr_range
267  val f2_pf_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_pf(0)   ||  isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine &&  f2_except_pf(1))))
268  val f2_af_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_af(0)   ||  isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_af(1))))
269
270  val f2_paddrs       = VecInit((0 until PortNumber).map(i => fromICache(i).bits.paddr))
271  val f2_perf_info    = io.icachePerfInfo
272
273  def cut(cacheline: UInt, cutPtr: Vec[UInt]) : Vec[UInt] ={
274    require(HasCExtension)
275    // if(HasCExtension){
276      val partCacheline = cacheline((blockBytes * 8 * 2 * 3) / 4 - 1, 0)
277      val result   = Wire(Vec(PredictWidth + 1, UInt(16.W)))
278      val dataVec  = cacheline.asTypeOf(Vec(blockBytes * 3 /4, UInt(16.W))) //47 16-bit data vector
279      (0 until PredictWidth + 1).foreach( i =>
280        result(i) := dataVec(cutPtr(i)) //the max ptr is 3*blockBytes/4-1
281      )
282      result
283    // } else {
284    //   val result   = Wire(Vec(PredictWidth, UInt(32.W)) )
285    //   val dataVec  = cacheline.asTypeOf(Vec(blockBytes * 2/ 4, UInt(32.W)))
286    //   (0 until PredictWidth).foreach( i =>
287    //     result(i) := dataVec(cutPtr(i))
288    //   )
289    //   result
290    // }
291  }
292
293  val f2_data_2_cacheline =  Wire(Vec(4, UInt((2 * blockBits).W)))
294  f2_data_2_cacheline(0) := Cat(f2_cache_response_reg_data(1) , f2_cache_response_reg_data(0))
295  f2_data_2_cacheline(1) := Cat(f2_cache_response_reg_data(1) , f2_cache_response_sram_data(0))
296  f2_data_2_cacheline(2) := Cat(f2_cache_response_sram_data(1) , f2_cache_response_reg_data(0))
297  f2_data_2_cacheline(3) := Cat(f2_cache_response_sram_data(1) , f2_cache_response_sram_data(0))
298
299  val f2_cut_data   = VecInit(f2_data_2_cacheline.map(data => cut(  data, f2_cut_ptr )))
300
301  val f2_predecod_ptr = Wire(UInt(2.W))
302  f2_predecod_ptr := Cat(f2_cache_response_select(1),f2_cache_response_select(0))
303
304  /** predecode (include RVC expander) */
305  // preDecoderRegIn.data := f2_reg_cut_data
306  // preDecoderRegInIn.frontendTrigger := io.frontendTrigger
307  // preDecoderRegInIn.csrTriggerEnable := io.csrTriggerEnable
308  // preDecoderRegIn.pc  := f2_pc
309
310  val preDecoderOut = Mux1H(UIntToOH(f2_predecod_ptr), preDecoders.map(_.io.out))
311  for(i <- 0 until 4){
312    val preDecoderIn  = preDecoders(i).io.in
313    preDecoderIn.data := f2_cut_data(i)
314    preDecoderIn.frontendTrigger := io.frontendTrigger
315    preDecoderIn.csrTriggerEnable := io.csrTriggerEnable
316    preDecoderIn.pc  := f2_pc
317  }
318
319  //val f2_expd_instr     = preDecoderOut.expInstr
320  val f2_instr          = preDecoderOut.instr
321  val f2_pd             = preDecoderOut.pd
322  val f2_jump_offset    = preDecoderOut.jumpOffset
323  val f2_hasHalfValid   =  preDecoderOut.hasHalfValid
324  val f2_crossPageFault = VecInit((0 until PredictWidth).map(i => isLastInLine(f2_pc(i)) && !f2_except_pf(0) && f2_doubleLine &&  f2_except_pf(1) && !f2_pd(i).isRVC ))
325
326  XSPerfAccumulate("fetch_bubble_icache_not_resp",   f2_valid && !icacheRespAllValid )
327
328
329  /**
330    ******************************************************************************
331    * IFU Stage 3
332    * - handle MMIO instruciton
333    *  -send request to Uncache fetch Unit
334    *  -every packet include 1 MMIO instruction
335    *  -MMIO instructions will stop fetch pipeline until commiting from RoB
336    *  -flush to snpc (send ifu_redirect to Ftq)
337    * - Ibuffer enqueue
338    * - check predict result in Frontend (jalFault/retFault/notCFIFault/invalidTakenFault/targetFault)
339    * - handle last half RVI instruction
340    ******************************************************************************
341    */
342
343  val f3_valid          = RegInit(false.B)
344  val f3_ftq_req        = RegEnable(f2_ftq_req,    f2_fire)
345  // val f3_situation      = RegEnable(f2_situation,  f2_fire)
346  val f3_doubleLine     = RegEnable(f2_doubleLine, f2_fire)
347  val f3_fire           = io.toIbuffer.fire()
348
349  f3_ready := f3_fire || !f3_valid
350
351  val f3_cut_data       = RegEnable(next = f2_cut_data(f2_predecod_ptr), enable=f2_fire)
352
353  val f3_except_pf      = RegEnable(f2_except_pf,  f2_fire)
354  val f3_except_af      = RegEnable(f2_except_af,  f2_fire)
355  val f3_mmio           = RegEnable(f2_mmio   ,  f2_fire)
356
357  //val f3_expd_instr     = RegEnable(next = f2_expd_instr,  enable = f2_fire)
358  val f3_instr          = RegEnable(next = f2_instr, enable = f2_fire)
359  val f3_expd_instr     = VecInit((0 until PredictWidth).map{ i =>
360    val expander       = Module(new RVCExpander)
361    expander.io.in := f3_instr(i)
362    expander.io.out.bits
363  })
364
365  val f3_pd             = RegEnable(next = f2_pd,          enable = f2_fire)
366  val f3_jump_offset    = RegEnable(next = f2_jump_offset, enable = f2_fire)
367  val f3_af_vec         = RegEnable(next = f2_af_vec,      enable = f2_fire)
368  val f3_pf_vec         = RegEnable(next = f2_pf_vec ,     enable = f2_fire)
369  val f3_pc             = RegEnable(next = f2_pc,          enable = f2_fire)
370  val f3_half_snpc        = RegEnable(next = f2_half_snpc, enable = f2_fire)
371  val f3_instr_range    = RegEnable(next = f2_instr_range, enable = f2_fire)
372  val f3_foldpc         = RegEnable(next = f2_foldpc,      enable = f2_fire)
373  val f3_crossPageFault = RegEnable(next = f2_crossPageFault,      enable = f2_fire)
374  val f3_hasHalfValid   = RegEnable(next = f2_hasHalfValid,      enable = f2_fire)
375  val f3_except         = VecInit((0 until 2).map{i => f3_except_pf(i) || f3_except_af(i)})
376  val f3_has_except     = f3_valid && (f3_except_af.reduce(_||_) || f3_except_pf.reduce(_||_))
377  val f3_pAddrs   = RegEnable(f2_paddrs,  f2_fire)
378  val f3_resend_vaddr   = RegEnable(f2_resend_vaddr,       f2_fire)
379
380  when(f3_valid && !f3_ftq_req.ftqOffset.valid){
381    assert(f3_ftq_req.startAddr + 32.U >= f3_ftq_req.nextStartAddr , "More tha 32 Bytes fetch is not allowed!")
382  }
383
384  /*** MMIO State Machine***/
385  val f3_mmio_data    = Reg(Vec(2, UInt(16.W)))
386  val mmio_is_RVC     = RegInit(false.B)
387  val mmio_resend_addr =RegInit(0.U(PAddrBits.W))
388  val mmio_resend_af  = RegInit(false.B)
389  val mmio_resend_pf  = RegInit(false.B)
390
391
392  val m_idle :: m_sendReq :: m_waitResp :: m_sendTLB :: m_tlbResp :: m_sendPMP :: m_resendReq :: m_waitResendResp :: m_waitCommit :: m_commited :: Nil = Enum(10)
393  val mmio_state = RegInit(m_idle)
394
395  val f3_req_is_mmio     = f3_mmio && f3_valid
396  val mmio_commit = VecInit(io.rob_commits.map{commit => commit.valid && commit.bits.ftqIdx === f3_ftq_req.ftqIdx &&  commit.bits.ftqOffset === 0.U}).asUInt.orR
397  val f3_mmio_req_commit = f3_req_is_mmio && mmio_state === m_commited
398
399  val f3_mmio_to_commit =  f3_req_is_mmio && mmio_state === m_waitCommit
400  val f3_mmio_to_commit_next = RegNext(f3_mmio_to_commit)
401  val f3_mmio_can_go      = f3_mmio_to_commit && !f3_mmio_to_commit_next
402
403  val fromFtqRedirectReg = RegNext(fromFtq.redirect)
404  val f3_ftq_flush_self     = fromFtqRedirectReg.valid && RedirectLevel.flushItself(fromFtqRedirectReg.bits.level)
405  val f3_ftq_flush_by_older = fromFtqRedirectReg.valid && isBefore(fromFtqRedirectReg.bits.ftqIdx, f3_ftq_req.ftqIdx)
406
407  val f3_need_not_flush = f3_req_is_mmio && fromFtqRedirectReg.valid && !f3_ftq_flush_self && !f3_ftq_flush_by_older
408
409  when(f3_flush && !f3_need_not_flush)               {f3_valid := false.B}
410  .elsewhen(f2_fire && !f2_flush )                   {f3_valid := true.B }
411  .elsewhen(io.toIbuffer.fire() && !f3_req_is_mmio)          {f3_valid := false.B}
412  .elsewhen{f3_req_is_mmio && f3_mmio_req_commit}            {f3_valid := false.B}
413
414  val f3_mmio_use_seq_pc = RegInit(false.B)
415
416  val (redirect_ftqIdx, redirect_ftqOffset)  = (fromFtqRedirectReg.bits.ftqIdx,fromFtqRedirectReg.bits.ftqOffset)
417  val redirect_mmio_req = fromFtqRedirectReg.valid && redirect_ftqIdx === f3_ftq_req.ftqIdx && redirect_ftqOffset === 0.U
418
419  when(RegNext(f2_fire && !f2_flush) && f3_req_is_mmio)        { f3_mmio_use_seq_pc := true.B  }
420  .elsewhen(redirect_mmio_req)                                 { f3_mmio_use_seq_pc := false.B }
421
422  f3_ready := Mux(f3_req_is_mmio, io.toIbuffer.ready && f3_mmio_req_commit || !f3_valid , io.toIbuffer.ready || !f3_valid)
423
424
425  switch(mmio_state){
426    is(m_idle){
427      when(f3_req_is_mmio){
428        mmio_state :=  m_sendReq
429      }
430    }
431
432    is(m_sendReq){
433      mmio_state :=  Mux(toUncache.fire(), m_waitResp, m_sendReq )
434    }
435
436    is(m_waitResp){
437      when(fromUncache.fire()){
438          val isRVC =  fromUncache.bits.data(1,0) =/= 3.U
439          val needResend = !isRVC && f3_pAddrs(0)(2,1) === 3.U
440          mmio_state :=  Mux(needResend, m_sendTLB , m_waitCommit)
441
442          mmio_is_RVC := isRVC
443          f3_mmio_data(0)   :=  fromUncache.bits.data(15,0)
444          f3_mmio_data(1)   :=  fromUncache.bits.data(31,16)
445      }
446    }
447
448    is(m_sendTLB){
449      when( io.iTLBInter.req.valid && !io.iTLBInter.resp.bits.miss ){
450        mmio_state :=  m_tlbResp
451      }
452    }
453
454    is(m_tlbResp){
455      val tlbExept = io.iTLBInter.resp.bits.excp(0).pf.instr ||
456                     io.iTLBInter.resp.bits.excp(0).af.instr
457      mmio_state :=  Mux(tlbExept,m_waitCommit,m_sendPMP)
458      mmio_resend_addr := io.iTLBInter.resp.bits.paddr(0)
459      mmio_resend_af := mmio_resend_af || io.iTLBInter.resp.bits.excp(0).af.instr
460      mmio_resend_pf := mmio_resend_pf || io.iTLBInter.resp.bits.excp(0).pf.instr
461    }
462
463    is(m_sendPMP){
464          val pmpExcpAF = io.pmp.resp.instr || !io.pmp.resp.mmio
465          mmio_state :=  Mux(pmpExcpAF, m_waitCommit , m_resendReq)
466          mmio_resend_af := pmpExcpAF
467    }
468
469    is(m_resendReq){
470      mmio_state :=  Mux(toUncache.fire(), m_waitResendResp, m_resendReq )
471    }
472
473    is(m_waitResendResp){
474      when(fromUncache.fire()){
475          mmio_state :=  m_waitCommit
476          f3_mmio_data(1)   :=  fromUncache.bits.data(15,0)
477      }
478    }
479
480    is(m_waitCommit){
481      when(mmio_commit){
482          mmio_state  :=  m_commited
483      }
484    }
485
486    //normal mmio instruction
487    is(m_commited){
488        mmio_state := m_idle
489        mmio_is_RVC := false.B
490        mmio_resend_addr := 0.U
491    }
492  }
493
494  //exception or flush by older branch prediction
495  when(f3_ftq_flush_self || f3_ftq_flush_by_older)  {
496    mmio_state := m_idle
497    mmio_is_RVC := false.B
498    mmio_resend_addr := 0.U
499    mmio_resend_af := false.B
500    f3_mmio_data.map(_ := 0.U)
501  }
502
503  toUncache.valid     :=  ((mmio_state === m_sendReq) || (mmio_state === m_resendReq)) && f3_req_is_mmio
504  toUncache.bits.addr := Mux((mmio_state === m_resendReq), mmio_resend_addr, f3_pAddrs(0))
505  fromUncache.ready   := true.B
506
507  io.iTLBInter.req.valid         := (mmio_state === m_sendTLB) && f3_req_is_mmio
508  io.iTLBInter.req.bits.size     := 3.U
509  io.iTLBInter.req.bits.vaddr    := f3_resend_vaddr
510  io.iTLBInter.req.bits.debug.pc := f3_resend_vaddr
511
512  io.iTLBInter.req.bits.kill                := false.B // IFU use itlb for mmio, doesn't need sync, set it to false
513  io.iTLBInter.req.bits.cmd                 := TlbCmd.exec
514  io.iTLBInter.req.bits.debug.robIdx        := DontCare
515  io.iTLBInter.req.bits.debug.isFirstIssue  := DontCare
516
517  io.pmp.req.valid := (mmio_state === m_sendPMP) && f3_req_is_mmio
518  io.pmp.req.bits.addr  := mmio_resend_addr
519  io.pmp.req.bits.size  := 3.U
520  io.pmp.req.bits.cmd   := TlbCmd.exec
521
522  val f3_lastHalf       = RegInit(0.U.asTypeOf(new LastHalfInfo))
523
524  val f3_predecode_range = VecInit(preDecoderOut.pd.map(inst => inst.valid)).asUInt
525  val f3_mmio_range      = VecInit((0 until PredictWidth).map(i => if(i ==0) true.B else false.B))
526  val f3_instr_valid     = Wire(Vec(PredictWidth, Bool()))
527
528  /*** prediction result check   ***/
529  checkerIn.ftqOffset   := f3_ftq_req.ftqOffset
530  checkerIn.jumpOffset  := f3_jump_offset
531  checkerIn.target      := f3_ftq_req.nextStartAddr
532  checkerIn.instrRange  := f3_instr_range.asTypeOf(Vec(PredictWidth, Bool()))
533  checkerIn.instrValid  := f3_instr_valid.asTypeOf(Vec(PredictWidth, Bool()))
534  checkerIn.pds         := f3_pd
535  checkerIn.pc          := f3_pc
536
537  /*** handle half RVI in the last 2 Bytes  ***/
538
539  def hasLastHalf(idx: UInt) = {
540    //!f3_pd(idx).isRVC && checkerOutStage1.fixedRange(idx) && f3_instr_valid(idx) && !checkerOutStage1.fixedTaken(idx) && !checkerOutStage2.fixedMissPred(idx) && ! f3_req_is_mmio
541    !f3_pd(idx).isRVC && checkerOutStage1.fixedRange(idx) && f3_instr_valid(idx) && !checkerOutStage1.fixedTaken(idx) && ! f3_req_is_mmio
542  }
543
544  val f3_last_validIdx             = ~ParallelPriorityEncoder(checkerOutStage1.fixedRange.reverse)
545
546  val f3_hasLastHalf         = hasLastHalf((PredictWidth - 1).U)
547  val f3_false_lastHalf      = hasLastHalf(f3_last_validIdx)
548  val f3_false_snpc          = f3_half_snpc(f3_last_validIdx)
549
550  val f3_lastHalf_mask    = VecInit((0 until PredictWidth).map( i => if(i ==0) false.B else true.B )).asUInt()
551  val f3_lastHalf_disable = RegInit(false.B)
552
553  when(f3_flush || (f3_fire && f3_lastHalf_disable)){
554    f3_lastHalf_disable := false.B
555  }
556
557  when (f3_flush) {
558    f3_lastHalf.valid := false.B
559  }.elsewhen (f3_fire) {
560    f3_lastHalf.valid := f3_hasLastHalf && !f3_lastHalf_disable
561    f3_lastHalf.middlePC := f3_ftq_req.nextStartAddr
562  }
563
564  f3_instr_valid := Mux(f3_lastHalf.valid,f3_hasHalfValid ,VecInit(f3_pd.map(inst => inst.valid)))
565
566  /*** frontend Trigger  ***/
567  frontendTrigger.io.pds  := f3_pd
568  frontendTrigger.io.pc   := f3_pc
569  frontendTrigger.io.data   := f3_cut_data
570
571  frontendTrigger.io.frontendTrigger  := io.frontendTrigger
572  frontendTrigger.io.csrTriggerEnable := io.csrTriggerEnable
573
574  val f3_triggered = frontendTrigger.io.triggered
575
576  /*** send to Ibuffer  ***/
577
578  io.toIbuffer.valid            := f3_valid && (!f3_req_is_mmio || f3_mmio_can_go) && !f3_flush
579  io.toIbuffer.bits.instrs      := f3_expd_instr
580  io.toIbuffer.bits.valid       := f3_instr_valid.asUInt
581  io.toIbuffer.bits.enqEnable   := checkerOutStage1.fixedRange.asUInt & f3_instr_valid.asUInt
582  io.toIbuffer.bits.pd          := f3_pd
583  io.toIbuffer.bits.ftqPtr      := f3_ftq_req.ftqIdx
584  io.toIbuffer.bits.pc          := f3_pc
585  io.toIbuffer.bits.ftqOffset.zipWithIndex.map{case(a, i) => a.bits := i.U; a.valid := checkerOutStage1.fixedTaken(i) && !f3_req_is_mmio}
586  io.toIbuffer.bits.foldpc      := f3_foldpc
587  io.toIbuffer.bits.ipf         := VecInit(f3_pf_vec.zip(f3_crossPageFault).map{case (pf, crossPF) => pf || crossPF})
588  io.toIbuffer.bits.acf         := f3_af_vec
589  io.toIbuffer.bits.crossPageIPFFix := f3_crossPageFault
590  io.toIbuffer.bits.triggered   := f3_triggered
591
592  when(f3_lastHalf.valid){
593    io.toIbuffer.bits.enqEnable := checkerOutStage1.fixedRange.asUInt & f3_instr_valid.asUInt & f3_lastHalf_mask
594    io.toIbuffer.bits.valid     := f3_lastHalf_mask & f3_instr_valid.asUInt
595  }
596
597
598
599  //Write back to Ftq
600  val f3_cache_fetch = f3_valid && !(f2_fire && !f2_flush)
601  val finishFetchMaskReg = RegNext(f3_cache_fetch)
602
603  val mmioFlushWb = Wire(Valid(new PredecodeWritebackBundle))
604  val f3_mmio_missOffset = Wire(ValidUndirectioned(UInt(log2Ceil(PredictWidth).W)))
605  f3_mmio_missOffset.valid := f3_req_is_mmio
606  f3_mmio_missOffset.bits  := 0.U
607
608  mmioFlushWb.valid           := (f3_req_is_mmio && mmio_state === m_waitCommit && RegNext(fromUncache.fire())  && f3_mmio_use_seq_pc)
609  mmioFlushWb.bits.pc         := f3_pc
610  mmioFlushWb.bits.pd         := f3_pd
611  mmioFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid :=  f3_mmio_range(i)}
612  mmioFlushWb.bits.ftqIdx     := f3_ftq_req.ftqIdx
613  mmioFlushWb.bits.ftqOffset  := f3_ftq_req.ftqOffset.bits
614  mmioFlushWb.bits.misOffset  := f3_mmio_missOffset
615  mmioFlushWb.bits.cfiOffset  := DontCare
616  mmioFlushWb.bits.target     := Mux(mmio_is_RVC, f3_ftq_req.startAddr + 2.U , f3_ftq_req.startAddr + 4.U)
617  mmioFlushWb.bits.jalTarget  := DontCare
618  mmioFlushWb.bits.instrRange := f3_mmio_range
619
620  /** external predecode for MMIO instruction */
621  when(f3_req_is_mmio){
622    val inst  = Cat(f3_mmio_data(1), f3_mmio_data(0))
623    val currentIsRVC   = isRVC(inst)
624
625    val brType::isCall::isRet::Nil = brInfo(inst)
626    val jalOffset = jal_offset(inst, currentIsRVC)
627    val brOffset  = br_offset(inst, currentIsRVC)
628
629    io.toIbuffer.bits.instrs (0) := new RVCDecoder(inst, XLEN).decode.bits
630
631
632    io.toIbuffer.bits.pd(0).valid   := true.B
633    io.toIbuffer.bits.pd(0).isRVC   := currentIsRVC
634    io.toIbuffer.bits.pd(0).brType  := brType
635    io.toIbuffer.bits.pd(0).isCall  := isCall
636    io.toIbuffer.bits.pd(0).isRet   := isRet
637
638    io.toIbuffer.bits.acf(0) := mmio_resend_af
639    io.toIbuffer.bits.ipf(0) := mmio_resend_pf
640    io.toIbuffer.bits.crossPageIPFFix(0) := mmio_resend_pf
641
642    io.toIbuffer.bits.enqEnable   := f3_mmio_range.asUInt
643
644    mmioFlushWb.bits.pd(0).valid   := true.B
645    mmioFlushWb.bits.pd(0).isRVC   := currentIsRVC
646    mmioFlushWb.bits.pd(0).brType  := brType
647    mmioFlushWb.bits.pd(0).isCall  := isCall
648    mmioFlushWb.bits.pd(0).isRet   := isRet
649  }
650
651  mmio_redirect := (f3_req_is_mmio && mmio_state === m_waitCommit && RegNext(fromUncache.fire())  && f3_mmio_use_seq_pc)
652
653  XSPerfAccumulate("fetch_bubble_ibuffer_not_ready",   io.toIbuffer.valid && !io.toIbuffer.ready )
654
655
656  /**
657    ******************************************************************************
658    * IFU Write Back Stage
659    * - write back predecode information to Ftq to update
660    * - redirect if found fault prediction
661    * - redirect if has false hit last half (last PC is not start + 32 Bytes, but in the midle of an notCFI RVI instruction)
662    ******************************************************************************
663    */
664
665  val wb_valid          = RegNext(RegNext(f2_fire && !f2_flush) && !f3_req_is_mmio && !f3_flush)
666  val wb_ftq_req        = RegNext(f3_ftq_req)
667
668  val wb_check_result_stage1   = RegNext(checkerOutStage1)
669  val wb_check_result_stage2   = checkerOutStage2
670  val wb_instr_range    = RegNext(io.toIbuffer.bits.enqEnable)
671  val wb_pc             = RegNext(f3_pc)
672  val wb_pd             = RegNext(f3_pd)
673  val wb_instr_valid    = RegNext(f3_instr_valid)
674
675  /* false hit lastHalf */
676  val wb_lastIdx        = RegNext(f3_last_validIdx)
677  val wb_false_lastHalf = RegNext(f3_false_lastHalf) && wb_lastIdx =/= (PredictWidth - 1).U
678  val wb_false_target   = RegNext(f3_false_snpc)
679
680  val wb_half_flush = wb_false_lastHalf
681  val wb_half_target = wb_false_target
682
683  /* false oversize */
684  val lastIsRVC = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool())).last  && wb_pd.last.isRVC
685  val lastIsRVI = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool()))(PredictWidth - 2) && !wb_pd(PredictWidth - 2).isRVC
686  val lastTaken = wb_check_result_stage1.fixedTaken.last
687
688  f3_wb_not_flush := wb_ftq_req.ftqIdx === f3_ftq_req.ftqIdx && f3_valid && wb_valid
689
690  /** if a req with a last half but miss predicted enters in wb stage, and this cycle f3 stalls,
691    * we set a flag to notify f3 that the last half flag need not to be set.
692    */
693  //f3_fire is after wb_valid
694  when(wb_valid && RegNext(f3_hasLastHalf,init = false.B)
695        && wb_check_result_stage2.fixedMissPred(PredictWidth - 1) && !f3_fire  && !RegNext(f3_fire,init = false.B)
696      ){
697    f3_lastHalf_disable := true.B
698  }
699
700  //wb_valid and f3_fire are in same cycle
701  when(wb_valid && RegNext(f3_hasLastHalf,init = false.B)
702        && wb_check_result_stage2.fixedMissPred(PredictWidth - 1) && f3_fire
703      ){
704    f3_lastHalf.valid := false.B
705  }
706
707  val checkFlushWb = Wire(Valid(new PredecodeWritebackBundle))
708  checkFlushWb.valid                  := wb_valid
709  checkFlushWb.bits.pc                := wb_pc
710  checkFlushWb.bits.pd                := wb_pd
711  checkFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := wb_instr_valid(i)}
712  checkFlushWb.bits.ftqIdx            := wb_ftq_req.ftqIdx
713  checkFlushWb.bits.ftqOffset         := wb_ftq_req.ftqOffset.bits
714  checkFlushWb.bits.misOffset.valid   := ParallelOR(wb_check_result_stage2.fixedMissPred) || wb_half_flush
715  checkFlushWb.bits.misOffset.bits    := Mux(wb_half_flush, wb_lastIdx, ParallelPriorityEncoder(wb_check_result_stage2.fixedMissPred))
716  checkFlushWb.bits.cfiOffset.valid   := ParallelOR(wb_check_result_stage1.fixedTaken)
717  checkFlushWb.bits.cfiOffset.bits    := ParallelPriorityEncoder(wb_check_result_stage1.fixedTaken)
718  checkFlushWb.bits.target            := Mux(wb_half_flush, wb_half_target, wb_check_result_stage2.fixedTarget(ParallelPriorityEncoder(wb_check_result_stage2.fixedMissPred)))
719  checkFlushWb.bits.jalTarget         := wb_check_result_stage2.fixedTarget(ParallelPriorityEncoder(VecInit(wb_pd.zip(wb_instr_valid).map{case (pd, v) => v && pd.isJal })))
720  checkFlushWb.bits.instrRange        := wb_instr_range.asTypeOf(Vec(PredictWidth, Bool()))
721
722  toFtq.pdWb := Mux(wb_valid, checkFlushWb,  mmioFlushWb)
723
724  wb_redirect := checkFlushWb.bits.misOffset.valid && wb_valid
725
726  /*write back flush type*/
727  val checkFaultType = wb_check_result_stage2.faultType
728  val checkJalFault =  wb_valid && checkFaultType.map(_.isjalFault).reduce(_||_)
729  val checkRetFault =  wb_valid && checkFaultType.map(_.isRetFault).reduce(_||_)
730  val checkTargetFault =  wb_valid && checkFaultType.map(_.istargetFault).reduce(_||_)
731  val checkNotCFIFault =  wb_valid && checkFaultType.map(_.notCFIFault).reduce(_||_)
732  val checkInvalidTaken =  wb_valid && checkFaultType.map(_.invalidTakenFault).reduce(_||_)
733
734
735  XSPerfAccumulate("predecode_flush_jalFault",   checkJalFault )
736  XSPerfAccumulate("predecode_flush_retFault",   checkRetFault )
737  XSPerfAccumulate("predecode_flush_targetFault",   checkTargetFault )
738  XSPerfAccumulate("predecode_flush_notCFIFault",   checkNotCFIFault )
739  XSPerfAccumulate("predecode_flush_incalidTakenFault",   checkInvalidTaken )
740
741  when(checkRetFault){
742    XSDebug("startAddr:%x  nextstartAddr:%x  taken:%d    takenIdx:%d\n",
743        wb_ftq_req.startAddr, wb_ftq_req.nextStartAddr, wb_ftq_req.ftqOffset.valid, wb_ftq_req.ftqOffset.bits)
744  }
745
746  /** performance counter */
747  val f3_perf_info     = RegEnable(f2_perf_info,  f2_fire)
748  val f3_req_0    = io.toIbuffer.fire()
749  val f3_req_1    = io.toIbuffer.fire() && f3_doubleLine
750  val f3_hit_0    = io.toIbuffer.fire() && f3_perf_info.bank_hit(0)
751  val f3_hit_1    = io.toIbuffer.fire() && f3_doubleLine & f3_perf_info.bank_hit(1)
752  val f3_hit      = f3_perf_info.hit
753  val perfEvents = Seq(
754    ("frontendFlush                ", wb_redirect                                ),
755    ("ifu_req                      ", io.toIbuffer.fire()                        ),
756    ("ifu_miss                     ", io.toIbuffer.fire() && !f3_perf_info.hit   ),
757    ("ifu_req_cacheline_0          ", f3_req_0                                   ),
758    ("ifu_req_cacheline_1          ", f3_req_1                                   ),
759    ("ifu_req_cacheline_0_hit      ", f3_hit_1                                   ),
760    ("ifu_req_cacheline_1_hit      ", f3_hit_1                                   ),
761    ("only_0_hit                   ", f3_perf_info.only_0_hit       && io.toIbuffer.fire() ),
762    ("only_0_miss                  ", f3_perf_info.only_0_miss      && io.toIbuffer.fire() ),
763    ("hit_0_hit_1                  ", f3_perf_info.hit_0_hit_1      && io.toIbuffer.fire() ),
764    ("hit_0_miss_1                 ", f3_perf_info.hit_0_miss_1     && io.toIbuffer.fire() ),
765    ("miss_0_hit_1                 ", f3_perf_info.miss_0_hit_1     && io.toIbuffer.fire() ),
766    ("miss_0_miss_1                ", f3_perf_info.miss_0_miss_1    && io.toIbuffer.fire() ),
767  )
768  generatePerfEvent()
769
770  XSPerfAccumulate("ifu_req",   io.toIbuffer.fire() )
771  XSPerfAccumulate("ifu_miss",  io.toIbuffer.fire() && !f3_hit )
772  XSPerfAccumulate("ifu_req_cacheline_0", f3_req_0  )
773  XSPerfAccumulate("ifu_req_cacheline_1", f3_req_1  )
774  XSPerfAccumulate("ifu_req_cacheline_0_hit",   f3_hit_0 )
775  XSPerfAccumulate("ifu_req_cacheline_1_hit",   f3_hit_1 )
776  XSPerfAccumulate("frontendFlush",  wb_redirect )
777  XSPerfAccumulate("only_0_hit",      f3_perf_info.only_0_hit   && io.toIbuffer.fire()  )
778  XSPerfAccumulate("only_0_miss",     f3_perf_info.only_0_miss  && io.toIbuffer.fire()  )
779  XSPerfAccumulate("hit_0_hit_1",     f3_perf_info.hit_0_hit_1  && io.toIbuffer.fire()  )
780  XSPerfAccumulate("hit_0_miss_1",    f3_perf_info.hit_0_miss_1  && io.toIbuffer.fire()  )
781  XSPerfAccumulate("miss_0_hit_1",    f3_perf_info.miss_0_hit_1   && io.toIbuffer.fire() )
782  XSPerfAccumulate("miss_0_miss_1",   f3_perf_info.miss_0_miss_1 && io.toIbuffer.fire() )
783  XSPerfAccumulate("hit_0_except_1",   f3_perf_info.hit_0_except_1 && io.toIbuffer.fire() )
784  XSPerfAccumulate("miss_0_except_1",   f3_perf_info.miss_0_except_1 && io.toIbuffer.fire() )
785  XSPerfAccumulate("except_0",   f3_perf_info.except_0 && io.toIbuffer.fire() )
786}
787
788