xref: /XiangShan/src/main/scala/xiangshan/frontend/IFU.scala (revision 7295133529ec07672490a4dcfc4832daadb8bb4b)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import xiangshan._
23import xiangshan.cache._
24import xiangshan.cache.mmu._
25import chisel3.experimental.verification
26import utils._
27import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
28
29trait HasInstrMMIOConst extends HasXSParameter with HasIFUConst{
30  def mmioBusWidth = 64
31  def mmioBusBytes = mmioBusWidth / 8
32  def maxInstrLen = 32
33}
34
35trait HasIFUConst extends HasXSParameter {
36  def align(pc: UInt, bytes: Int): UInt = Cat(pc(VAddrBits-1, log2Ceil(bytes)), 0.U(log2Ceil(bytes).W))
37  // def groupAligned(pc: UInt)  = align(pc, groupBytes)
38  // def packetAligned(pc: UInt) = align(pc, packetBytes)
39}
40
41class IfuToFtqIO(implicit p:Parameters) extends XSBundle {
42  val pdWb = Valid(new PredecodeWritebackBundle)
43}
44
45class FtqInterface(implicit p: Parameters) extends XSBundle {
46  val fromFtq = Flipped(new FtqToIfuIO)
47  val toFtq   = new IfuToFtqIO
48}
49
50class UncacheInterface(implicit p: Parameters) extends XSBundle {
51  val fromUncache = Flipped(DecoupledIO(new InsUncacheResp))
52  val toUncache   = DecoupledIO( new InsUncacheReq )
53}
54
55class ICacheInterface(implicit p: Parameters) extends XSBundle {
56  val toIMeta       = Decoupled(new ICacheReadBundle)
57  val toIData       = Decoupled(new ICacheReadBundle)
58  val toMissQueue   = Vec(2,Decoupled(new ICacheMissReq))
59  val fromIMeta     = Input(new ICacheMetaRespBundle)
60  val fromIData     = Input(new ICacheDataRespBundle)
61  val fromMissQueue = Vec(2,Flipped(Decoupled(new ICacheMissResp)))
62}
63
64class NewIFUIO(implicit p: Parameters) extends XSBundle {
65  val ftqInter        = new FtqInterface
66  val icacheInter     = new ICacheInterface
67  val toIbuffer       = Decoupled(new FetchToIBuffer)
68  val iTLBInter       = Vec(2, new BlockTlbRequestIO)
69  val uncacheInter   =  new UncacheInterface
70  val pmp             = Vec(2, new Bundle {
71    val req = Valid(new PMPReqBundle())
72    val resp = Flipped(new PMPRespBundle())
73  })
74  val frontendTrigger = Flipped(new FrontendTdataDistributeIO)
75  val csrTriggerEnable = Input(Vec(4, Bool()))
76  val rob_commits = Flipped(Vec(CommitWidth, Valid(new RobCommitInfo)))
77}
78
79// record the situation in which fallThruAddr falls into
80// the middle of an RVI inst
81class LastHalfInfo(implicit p: Parameters) extends XSBundle {
82  val valid = Bool()
83  val middlePC = UInt(VAddrBits.W)
84  def matchThisBlock(startAddr: UInt) = valid && middlePC === startAddr
85}
86
87class IfuToPreDecode(implicit p: Parameters) extends XSBundle {
88  val data          = if(HasCExtension) Vec(PredictWidth + 1, UInt(16.W)) else Vec(PredictWidth, UInt(32.W))
89  val startAddr     = UInt(VAddrBits.W)
90  val fallThruAddr  = UInt(VAddrBits.W)
91  val fallThruError = Bool()
92  val isDoubleLine  = Bool()
93  val ftqOffset     = Valid(UInt(log2Ceil(PredictWidth).W))
94  val target        = UInt(VAddrBits.W)
95  val pageFault     = Vec(2, Bool())
96  val accessFault   = Vec(2, Bool())
97  val instValid     = Bool()
98  val lastHalfMatch = Bool()
99  val oversize      = Bool()
100  val mmio = Bool()
101  val frontendTrigger = new FrontendTdataDistributeIO
102  val csrTriggerEnable = Vec(4, Bool())
103}
104
105class NewIFU(implicit p: Parameters) extends XSModule with HasICacheParameters
106{
107  println(s"icache ways: ${nWays} sets:${nSets}")
108  val io = IO(new NewIFUIO)
109  val (toFtq, fromFtq)    = (io.ftqInter.toFtq, io.ftqInter.fromFtq)
110  val (toMeta, toData, meta_resp, data_resp) =  (io.icacheInter.toIMeta, io.icacheInter.toIData, io.icacheInter.fromIMeta, io.icacheInter.fromIData)
111  val (toMissQueue, fromMissQueue) = (io.icacheInter.toMissQueue, io.icacheInter.fromMissQueue)
112  val (toUncache, fromUncache) = (io.uncacheInter.toUncache , io.uncacheInter.fromUncache)
113  val (toITLB, fromITLB) = (VecInit(io.iTLBInter.map(_.req)), VecInit(io.iTLBInter.map(_.resp)))
114  val fromPMP = io.pmp.map(_.resp)
115
116  def isCrossLineReq(start: UInt, end: UInt): Bool = start(blockOffBits) ^ end(blockOffBits)
117
118  def isLastInCacheline(fallThruAddr: UInt): Bool = fallThruAddr(blockOffBits - 1, 1) === 0.U
119
120    def ResultHoldBypass[T<:Data](data: T, valid: Bool): T = {
121    Mux(valid, data, RegEnable(data, valid))
122  }
123
124  //---------------------------------------------
125  //  Fetch Stage 1 :
126  //  * Send req to ICache Meta/Data
127  //  * Check whether need 2 line fetch
128  //---------------------------------------------
129
130  val f0_valid                             = fromFtq.req.valid
131  val f0_ftq_req                           = fromFtq.req.bits
132  val f0_situation                         = VecInit(Seq(isCrossLineReq(f0_ftq_req.startAddr, f0_ftq_req.fallThruAddr), isLastInCacheline(f0_ftq_req.fallThruAddr)))
133  val f0_doubleLine                        = f0_situation(0) || f0_situation(1)
134  val f0_vSetIdx                           = VecInit(get_idx((f0_ftq_req.startAddr)), get_idx(f0_ftq_req.fallThruAddr))
135  val f0_fire                              = fromFtq.req.fire()
136
137  val f0_flush, f1_flush, f2_flush, f3_flush = WireInit(false.B)
138  val from_bpu_f0_flush, from_bpu_f1_flush, from_bpu_f2_flush, from_bpu_f3_flush = WireInit(false.B)
139
140  from_bpu_f0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(f0_ftq_req.ftqIdx) ||
141                       fromFtq.flushFromBpu.shouldFlushByStage3(f0_ftq_req.ftqIdx)
142
143  val f3_redirect = WireInit(false.B)
144  f3_flush := fromFtq.redirect.valid
145  f2_flush := f3_flush || f3_redirect
146  f1_flush := f2_flush || from_bpu_f1_flush
147  f0_flush := f1_flush || from_bpu_f0_flush
148
149  val f1_ready, f2_ready, f3_ready         = WireInit(false.B)
150
151  //fetch: send addr to Meta/TLB and Data simultaneously
152  val fetch_req = List(toMeta, toData)
153  for(i <- 0 until 2) {
154    fetch_req(i).valid := f0_fire
155    fetch_req(i).bits.isDoubleLine := f0_doubleLine
156    fetch_req(i).bits.vSetIdx := f0_vSetIdx
157  }
158
159  fromFtq.req.ready := fetch_req(0).ready && fetch_req(1).ready && f1_ready && GTimer() > 500.U
160
161  XSPerfAccumulate("ifu_bubble_ftq_not_valid",   !f0_valid )
162  XSPerfAccumulate("ifu_bubble_pipe_stall",    f0_valid && fetch_req(0).ready && fetch_req(1).ready && !f1_ready )
163  XSPerfAccumulate("ifu_bubble_sram_0_busy",   f0_valid && !fetch_req(0).ready  )
164  XSPerfAccumulate("ifu_bubble_sram_1_busy",   f0_valid && !fetch_req(1).ready  )
165
166  //---------------------------------------------
167  //  Fetch Stage 2 :
168  //  * Send req to ITLB and TLB Response (Get Paddr)
169  //  * ICache Response (Get Meta and Data)
170  //  * Hit Check (Generate hit signal and hit vector)
171  //  * Get victim way
172  //---------------------------------------------
173
174  //TODO: handle fetch exceptions
175
176  val tlbRespAllValid = WireInit(false.B)
177
178  val f1_valid      = RegInit(false.B)
179  val f1_ftq_req    = RegEnable(next = f0_ftq_req,    enable=f0_fire)
180  val f1_situation  = RegEnable(next = f0_situation,  enable=f0_fire)
181  val f1_doubleLine = RegEnable(next = f0_doubleLine, enable=f0_fire)
182  val f1_vSetIdx    = RegEnable(next = f0_vSetIdx,    enable=f0_fire)
183  val f1_fire       = f1_valid && tlbRespAllValid && f2_ready
184
185  f1_ready := f2_ready && tlbRespAllValid || !f1_valid
186
187  from_bpu_f1_flush := fromFtq.flushFromBpu.shouldFlushByStage3(f1_ftq_req.ftqIdx)
188
189  val preDecoder      = Module(new PreDecode)
190  val (preDecoderIn, preDecoderOut)   = (preDecoder.io.in, preDecoder.io.out)
191
192  //flush generate and to Ftq
193  val predecodeOutValid = WireInit(false.B)
194
195  when(f1_flush)                  {f1_valid  := false.B}
196  .elsewhen(f0_fire && !f0_flush) {f1_valid  := true.B}
197  .elsewhen(f1_fire)              {f1_valid  := false.B}
198
199  toITLB(0).valid         := f1_valid
200  toITLB(0).bits.size     := 3.U // TODO: fix the size
201  toITLB(0).bits.vaddr    := f1_ftq_req.startAddr
202  toITLB(0).bits.debug.pc := f1_ftq_req.startAddr
203
204  toITLB(1).valid         := f1_valid && f1_doubleLine
205  toITLB(1).bits.size     := 3.U // TODO: fix the size
206  toITLB(1).bits.vaddr    := f1_ftq_req.fallThruAddr
207  toITLB(1).bits.debug.pc := f1_ftq_req.fallThruAddr
208
209  toITLB.map{port =>
210    port.bits.cmd                 := TlbCmd.exec
211    port.bits.robIdx              := DontCare
212    port.bits.debug.isFirstIssue  := DontCare
213  }
214
215  fromITLB.map(_.ready := true.B)
216
217  val (tlbRespValid, tlbRespPAddr) = (fromITLB.map(_.valid), VecInit(fromITLB.map(_.bits.paddr)))
218  val (tlbRespMiss) = (fromITLB.map(port => port.bits.miss && port.valid))
219  val (tlbExcpPF,    tlbExcpAF)    = (fromITLB.map(port => port.bits.excp.pf.instr && port.valid),
220    fromITLB.map(port => (port.bits.excp.af.instr) && port.valid)) //TODO: Temp treat mmio req as access fault
221
222  tlbRespAllValid := tlbRespValid(0)  && (tlbRespValid(1) || !f1_doubleLine)
223
224  val f1_pAddrs             = tlbRespPAddr
225  val f1_pTags              = VecInit(f1_pAddrs.map(get_phy_tag(_)))
226
227  val f1_tags               = ResultHoldBypass(data = meta_resp.tags, valid = RegNext(toMeta.fire()))
228  val f1_cacheline_valid    = ResultHoldBypass(data = meta_resp.valid, valid = RegNext(toMeta.fire()))
229  val f1_datas              = ResultHoldBypass(data = data_resp.datas, valid = RegNext(toData.fire()))
230
231  val bank0_hit_vec         = VecInit(f1_tags(0).zipWithIndex.map{ case(way_tag,i) => f1_cacheline_valid(0)(i) && way_tag ===  f1_pTags(0) })
232  val bank1_hit_vec         = VecInit(f1_tags(1).zipWithIndex.map{ case(way_tag,i) => f1_cacheline_valid(1)(i) && way_tag ===  f1_pTags(1) })
233  val (bank0_hit,bank1_hit) = (ParallelOR(bank0_hit_vec) && !tlbExcpPF(0) && !tlbExcpAF(0), ParallelOR(bank1_hit_vec) && !tlbExcpPF(1) && !tlbExcpAF(1))
234  val f1_hit                = (bank0_hit && bank1_hit && f1_valid && f1_doubleLine) || (f1_valid && !f1_doubleLine && bank0_hit)
235  val f1_bank_hit_vec       = VecInit(Seq(bank0_hit_vec, bank1_hit_vec))
236  val f1_bank_hit           = VecInit(Seq(bank0_hit, bank1_hit))
237
238
239  val replacers       = Seq.fill(2)(ReplacementPolicy.fromString(Some("random"),nWays,nSets/2))
240  val f1_victim_masks = VecInit(replacers.zipWithIndex.map{case (replacer, i) => UIntToOH(replacer.way(f1_vSetIdx(i)))})
241
242  val touch_sets = Seq.fill(2)(Wire(Vec(2, UInt(log2Ceil(nSets/2).W))))
243  val touch_ways = Seq.fill(2)(Wire(Vec(2, Valid(UInt(log2Ceil(nWays).W)))) )
244
245  ((replacers zip touch_sets) zip touch_ways).map{case ((r, s),w) => r.access(s,w)}
246
247  val f1_hit_data      =  VecInit(f1_datas.zipWithIndex.map { case(bank, i) =>
248    val bank_hit_data = Mux1H(f1_bank_hit_vec(i).asUInt, bank)
249    bank_hit_data
250  })
251
252  (0 until nWays).map{ w =>
253    XSPerfAccumulate("line_0_hit_way_" + Integer.toString(w, 10),  f1_fire && f1_bank_hit(0) && OHToUInt(f1_bank_hit_vec(0))  === w.U)
254  }
255
256  (0 until nWays).map{ w =>
257    XSPerfAccumulate("line_0_victim_way_" + Integer.toString(w, 10),  f1_fire && !f1_bank_hit(0) && OHToUInt(f1_victim_masks(0))  === w.U)
258  }
259
260  (0 until nWays).map{ w =>
261    XSPerfAccumulate("line_1_hit_way_" + Integer.toString(w, 10),  f1_fire && f1_doubleLine && f1_bank_hit(1) && OHToUInt(f1_bank_hit_vec(1))  === w.U)
262  }
263
264  (0 until nWays).map{ w =>
265    XSPerfAccumulate("line_1_victim_way_" + Integer.toString(w, 10),  f1_fire && f1_doubleLine && !f1_bank_hit(1) && OHToUInt(f1_victim_masks(1))  === w.U)
266  }
267
268  XSPerfAccumulate("ifu_bubble_f1_tlb_miss",    f1_valid && !tlbRespAllValid )
269
270  //---------------------------------------------
271  //  Fetch Stage 3 :
272  //  * get data from last stage (hit from f1_hit_data/miss from missQueue response)
273  //  * if at least one needed cacheline miss, wait for miss queue response (a wait_state machine) THIS IS TOO UGLY!!!
274  //  * cut cacheline(s) and send to PreDecode
275  //  * check if prediction is right (branch target and type, jump direction and type , jal target )
276  //---------------------------------------------
277  val f2_fetchFinish = Wire(Bool())
278
279  val f2_valid        = RegInit(false.B)
280  val f2_ftq_req      = RegEnable(next = f1_ftq_req,    enable = f1_fire)
281  val f2_situation    = RegEnable(next = f1_situation,  enable=f1_fire)
282  val f2_doubleLine   = RegEnable(next = f1_doubleLine, enable=f1_fire)
283  val f2_fire         = f2_valid && f2_fetchFinish && f3_ready
284
285  when(f2_flush)                  {f2_valid := false.B}
286  .elsewhen(f1_fire && !f1_flush) {f2_valid := true.B }
287  .elsewhen(f2_fire)              {f2_valid := false.B}
288
289  val pmpExcpAF = fromPMP.map(port => port.instr)
290  val mmio = fromPMP.map(port => port.mmio) // TODO: handle it
291
292
293  val f2_pAddrs   = RegEnable(next = f1_pAddrs, enable = f1_fire)
294  val f2_hit      = RegEnable(next = f1_hit   , enable = f1_fire)
295  val f2_bank_hit = RegEnable(next = f1_bank_hit, enable = f1_fire)
296  val f2_miss     = f2_valid && !f2_hit
297  val (f2_vSetIdx, f2_pTags) = (RegEnable(next = f1_vSetIdx, enable = f1_fire), RegEnable(next = f1_pTags, enable = f1_fire))
298  val f2_waymask  = RegEnable(next = f1_victim_masks, enable = f1_fire)
299  //exception information
300  val f2_except_pf = RegEnable(next = VecInit(tlbExcpPF), enable = f1_fire)
301  val f2_except_af = VecInit(RegEnable(next = VecInit(tlbExcpAF), enable = f1_fire).zip(pmpExcpAF).map(a => a._1 || DataHoldBypass(a._2, RegNext(f1_fire)).asBool))
302  val f2_except    = VecInit((0 until 2).map{i => f2_except_pf(i) || f2_except_af(i)})
303  val f2_has_except = f2_valid && (f2_except_af.reduce(_||_) || f2_except_pf.reduce(_||_))
304  val f2_mmio      = io.pmp(0).resp.mmio && !f2_except_af(0) && !f2_except_pf(0) && f2_valid
305
306  f2_ready := (f3_ready && f2_fetchFinish) || !f2_valid
307
308
309  io.pmp.zipWithIndex.map { case (p, i) =>
310    p.req.valid := f2_fire
311    p.req.bits.addr := f2_pAddrs(i)
312    p.req.bits.size := 3.U // TODO
313    p.req.bits.cmd := TlbCmd.exec
314  }
315
316  //instruction
317  val wait_idle :: wait_queue_ready :: wait_send_req  :: wait_two_resp :: wait_0_resp :: wait_1_resp :: wait_one_resp ::wait_finish ::Nil = Enum(8)
318  val wait_state = RegInit(wait_idle)
319
320  fromMissQueue.map{port => port.ready := true.B}
321
322  val (miss0_resp, miss1_resp) = (fromMissQueue(0).fire(), fromMissQueue(1).fire())
323  val (bank0_fix, bank1_fix)   = (miss0_resp  && !f2_bank_hit(0), miss1_resp && f2_doubleLine && !f2_bank_hit(1))
324
325  val  only_0_miss = f2_valid && !f2_hit && !f2_doubleLine && !f2_has_except && !f2_mmio
326  val  only_0_hit  = f2_valid && f2_hit && !f2_doubleLine  && !f2_mmio
327  val  hit_0_hit_1  = f2_valid && f2_hit && f2_doubleLine  && !f2_mmio
328  val (hit_0_miss_1 ,  miss_0_hit_1,  miss_0_miss_1) = (  (f2_valid && !f2_bank_hit(1) && f2_bank_hit(0) && f2_doubleLine  && !f2_has_except  && !f2_mmio),
329                                                          (f2_valid && !f2_bank_hit(0) && f2_bank_hit(1) && f2_doubleLine  && !f2_has_except  && !f2_mmio),
330                                                          (f2_valid && !f2_bank_hit(0) && !f2_bank_hit(1) && f2_doubleLine && !f2_has_except  && !f2_mmio),
331                                                       )
332
333  val  hit_0_except_1  = f2_valid && f2_doubleLine &&  !f2_except(0) && f2_except(1)  &&  f2_bank_hit(0)
334  val  miss_0_except_1 = f2_valid && f2_doubleLine &&  !f2_except(0) && f2_except(1)  && !f2_bank_hit(0)
335  //val  fetch0_except_1 = hit_0_except_1 || miss_0_except_1
336  val  except_0        = f2_valid && f2_except(0)
337
338  val f2_mq_datas     = Reg(Vec(2, UInt(blockBits.W)))
339
340  when(fromMissQueue(0).fire) {f2_mq_datas(0) :=  fromMissQueue(0).bits.data}
341  when(fromMissQueue(1).fire) {f2_mq_datas(1) :=  fromMissQueue(1).bits.data}
342
343  switch(wait_state){
344    is(wait_idle){
345      when(miss_0_except_1){
346        wait_state :=  Mux(toMissQueue(0).ready, wait_queue_ready ,wait_idle )
347      }.elsewhen( only_0_miss  || miss_0_hit_1){
348        wait_state :=  Mux(toMissQueue(0).ready, wait_queue_ready ,wait_idle )
349      }.elsewhen(hit_0_miss_1){
350        wait_state :=  Mux(toMissQueue(1).ready, wait_queue_ready ,wait_idle )
351      }.elsewhen( miss_0_miss_1 ){
352        wait_state := Mux(toMissQueue(0).ready && toMissQueue(1).ready, wait_queue_ready ,wait_idle)
353      }
354    }
355
356    //TODO: naive logic for wait icache response
357    is(wait_queue_ready){
358      wait_state := wait_send_req
359    }
360
361    is(wait_send_req) {
362      when(miss_0_except_1 || only_0_miss || hit_0_miss_1 || miss_0_hit_1){
363        wait_state :=  wait_one_resp
364      }.elsewhen( miss_0_miss_1 ){
365        wait_state := wait_two_resp
366      }
367    }
368
369    is(wait_one_resp) {
370      when( (miss_0_except_1 ||only_0_miss || miss_0_hit_1) && fromMissQueue(0).fire()){
371        wait_state := wait_finish
372      }.elsewhen( hit_0_miss_1 && fromMissQueue(1).fire()){
373        wait_state := wait_finish
374      }
375    }
376
377    is(wait_two_resp) {
378      when(fromMissQueue(0).fire() && fromMissQueue(1).fire()){
379        wait_state := wait_finish
380      }.elsewhen( !fromMissQueue(0).fire() && fromMissQueue(1).fire() ){
381        wait_state := wait_0_resp
382      }.elsewhen(fromMissQueue(0).fire() && !fromMissQueue(1).fire()){
383        wait_state := wait_1_resp
384      }
385    }
386
387    is(wait_0_resp) {
388      when(fromMissQueue(0).fire()){
389        wait_state := wait_finish
390      }
391    }
392
393    is(wait_1_resp) {
394      when(fromMissQueue(1).fire()){
395        wait_state := wait_finish
396      }
397    }
398
399    is(wait_finish) {
400      when(f2_fire) {wait_state := wait_idle }
401    }
402  }
403
404  when(f2_flush) { wait_state := wait_idle }
405
406  (0 until 2).map { i =>
407    if(i == 1) toMissQueue(i).valid := (hit_0_miss_1 || miss_0_miss_1) && wait_state === wait_queue_ready
408      else     toMissQueue(i).valid := (only_0_miss || miss_0_hit_1 || miss_0_miss_1 || miss_0_except_1) && wait_state === wait_queue_ready
409    toMissQueue(i).bits.addr    := f2_pAddrs(i)
410    toMissQueue(i).bits.vSetIdx := f2_vSetIdx(i)
411    toMissQueue(i).bits.waymask := f2_waymask(i)
412    toMissQueue(i).bits.clientID :=0.U
413  }
414
415
416  val miss_all_fix       = (wait_state === wait_finish)
417
418  f2_fetchFinish         := ((f2_valid && f2_hit) || (f2_valid && f2_mmio) || miss_all_fix || hit_0_except_1 || except_0)
419
420  XSPerfAccumulate("ifu_bubble_f2_miss",    f2_valid && !f2_fetchFinish )
421
422  (touch_ways zip touch_sets).zipWithIndex.map{ case((t_w,t_s), i) =>
423    t_s(0)         := f1_vSetIdx(i)
424    t_w(0).valid   := f1_bank_hit(i)
425    t_w(0).bits    := OHToUInt(f1_bank_hit_vec(i))
426
427    t_s(1)         := f2_vSetIdx(i)
428    t_w(1).valid   := f2_valid && !f2_bank_hit(i)
429    t_w(1).bits    := OHToUInt(f2_waymask(i))
430  }
431
432  val sec_miss_reg   = RegInit(0.U.asTypeOf(Vec(4, Bool())))
433  val reservedRefillData = Reg(Vec(2, UInt(blockBits.W)))
434  val f2_hit_datas    = RegEnable(next = f1_hit_data, enable = f1_fire)
435  val f2_datas        = Wire(Vec(2, UInt(blockBits.W)))
436
437  f2_datas.zipWithIndex.map{case(bank,i) =>
438    if(i == 0) bank := Mux(f2_bank_hit(i), f2_hit_datas(i),Mux(sec_miss_reg(2),reservedRefillData(1),Mux(sec_miss_reg(0),reservedRefillData(0), f2_mq_datas(i))))
439    else bank := Mux(f2_bank_hit(i), f2_hit_datas(i),Mux(sec_miss_reg(3),reservedRefillData(1),Mux(sec_miss_reg(1),reservedRefillData(0), f2_mq_datas(i))))
440  }
441
442  val f2_jump_valids          = Fill(PredictWidth, !preDecoderOut.cfiOffset.valid)   | Fill(PredictWidth, 1.U(1.W)) >> (~preDecoderOut.cfiOffset.bits)
443  val f2_predecode_valids     = VecInit(preDecoderOut.pd.map(instr => instr.valid)).asUInt & f2_jump_valids
444
445  def cut(cacheline: UInt, start: UInt) : Vec[UInt] ={
446    if(HasCExtension){
447      val result   = Wire(Vec(PredictWidth + 1, UInt(16.W)))
448      val dataVec  = cacheline.asTypeOf(Vec(blockBytes * 2/ 2, UInt(16.W)))
449      val startPtr = Cat(0.U(1.W), start(blockOffBits-1, 1))
450      (0 until PredictWidth + 1).foreach( i =>
451        result(i) := dataVec(startPtr + i.U)
452      )
453      result
454    } else {
455      val result   = Wire(Vec(PredictWidth, UInt(32.W)) )
456      val dataVec  = cacheline.asTypeOf(Vec(blockBytes * 2/ 4, UInt(32.W)))
457      val startPtr = Cat(0.U(1.W), start(blockOffBits-1, 2))
458      (0 until PredictWidth).foreach( i =>
459        result(i) := dataVec(startPtr + i.U)
460      )
461      result
462    }
463  }
464
465  val f2_cut_data = cut( Cat(f2_datas.map(cacheline => cacheline.asUInt ).reverse).asUInt, f2_ftq_req.startAddr )
466
467  // deal with secondary miss in f1
468  val f2_0_f1_0 =   ((f2_valid && !f2_bank_hit(0)) && f1_valid && (get_block_addr(f2_ftq_req.startAddr) === get_block_addr(f1_ftq_req.startAddr)))
469  val f2_0_f1_1 =   ((f2_valid && !f2_bank_hit(0)) && f1_valid && f1_doubleLine && (get_block_addr(f2_ftq_req.startAddr) === get_block_addr(f1_ftq_req.startAddr + blockBytes.U)))
470  val f2_1_f1_0 =   ((f2_valid && !f2_bank_hit(1) && f2_doubleLine) && f1_valid && (get_block_addr(f2_ftq_req.startAddr+ blockBytes.U) === get_block_addr(f1_ftq_req.startAddr) ))
471  val f2_1_f1_1 =   ((f2_valid && !f2_bank_hit(1) && f2_doubleLine) && f1_valid && f1_doubleLine && (get_block_addr(f2_ftq_req.startAddr+ blockBytes.U) === get_block_addr(f1_ftq_req.startAddr + blockBytes.U) ))
472
473  val isSameLine = f2_0_f1_0 || f2_0_f1_1 || f2_1_f1_0 || f2_1_f1_1
474  val sec_miss_sit   = VecInit(Seq(f2_0_f1_0, f2_0_f1_1, f2_1_f1_0, f2_1_f1_1))
475  val hasSecMiss     = RegInit(false.B)
476
477  when(f2_flush){
478    sec_miss_reg.map(sig => sig := false.B)
479    hasSecMiss := false.B
480  }.elsewhen(isSameLine && !f1_flush && f2_fire){
481    sec_miss_reg.zipWithIndex.map{case(sig, i) => sig := sec_miss_sit(i)}
482    hasSecMiss := true.B
483  }.elsewhen((!isSameLine || f1_flush) && hasSecMiss && f2_fire){
484    sec_miss_reg.map(sig => sig := false.B)
485    hasSecMiss := false.B
486  }
487
488  when((f2_0_f1_0 || f2_0_f1_1) && f2_fire){
489    reservedRefillData(0) := f2_mq_datas(0)
490  }
491
492  when((f2_1_f1_0 || f2_1_f1_1) && f2_fire){
493    reservedRefillData(1) := f2_mq_datas(1)
494  }
495
496
497  //---------------------------------------------
498  //  Fetch Stage 4 :
499  //  * get data from last stage (hit from f1_hit_data/miss from missQueue response)
500  //  * if at least one needed cacheline miss, wait for miss queue response (a wait_state machine) THIS IS TOO UGLY!!!
501  //  * cut cacheline(s) and send to PreDecode
502  //  * check if prediction is right (branch target and type, jump direction and type , jal target )
503  //---------------------------------------------
504  val f3_valid          = RegInit(false.B)
505  val f3_ftq_req        = RegEnable(next = f2_ftq_req,    enable=f2_fire)
506  val f3_situation      = RegEnable(next = f2_situation,  enable=f2_fire)
507  val f3_doubleLine     = RegEnable(next = f2_doubleLine, enable=f2_fire)
508
509  val f3_cut_data       = RegEnable(next = f2_cut_data, enable=f2_fire)
510  val f3_except_pf      = RegEnable(next = f2_except_pf, enable = f2_fire)
511  val f3_except_af      = RegEnable(next = f2_except_af, enable = f2_fire)
512  val f3_hit            = RegEnable(next = f2_hit   , enable = f2_fire)
513  val f3_mmio           = RegEnable(next = f2_mmio   , enable = f2_fire)
514
515  //assert((f3_ftq_req.startAddr + 34.U) >= f3_ftq_req.fallThruAddr, "Fall through address exceeds the limit")
516
517  val f3_lastHalf       = RegInit(0.U.asTypeOf(new LastHalfInfo))
518  val f3_lastHalfMatch  = f3_lastHalf.matchThisBlock(f3_ftq_req.startAddr)
519  val f3_except         = VecInit((0 until 2).map{i => f3_except_pf(i) || f3_except_af(i)})
520  val f3_has_except     = f3_valid && (f3_except_af.reduce(_||_) || f3_except_pf.reduce(_||_))
521  val f3_pAddrs   = RegEnable(next = f2_pAddrs, enable = f2_fire)
522
523  val f3_mmio_data    = Reg(UInt(maxInstrLen.W))
524
525  val f3_data = if(HasCExtension) Wire(Vec(PredictWidth + 1, UInt(16.W))) else Wire(Vec(PredictWidth, UInt(32.W)))
526  f3_data       :=  f3_cut_data
527
528  //performance counter
529  val f3_only_0_hit     = RegEnable(next = only_0_hit, enable = f2_fire)
530  val f3_only_0_miss    = RegEnable(next = only_0_miss, enable = f2_fire)
531  val f3_hit_0_hit_1    = RegEnable(next = hit_0_hit_1, enable = f2_fire)
532  val f3_hit_0_miss_1   = RegEnable(next = hit_0_miss_1, enable = f2_fire)
533  val f3_miss_0_hit_1   = RegEnable(next = miss_0_hit_1, enable = f2_fire)
534  val f3_miss_0_miss_1  = RegEnable(next = miss_0_miss_1, enable = f2_fire)
535
536  val mmio_idle :: mmio_send_req :: mmio_w_resp :: mmio_resend :: mmio_resend_w_resp :: mmio_w_commit :: Nil = Enum(6)
537  val mmio_state = RegInit(mmio_idle)
538
539  val f3_req_is_mmio     = f3_mmio && f3_valid
540  val mmio_has_commited = VecInit(io.rob_commits.map{commit => commit.valid && commit.bits.ftqIdx === f3_ftq_req.ftqIdx &&  commit.bits.ftqOffset === 0.U}).asUInt.orR
541  val f3_mmio_req_commit = f3_req_is_mmio && mmio_state === mmio_w_commit && mmio_has_commited
542
543  val f3_mmio_to_commit =  f3_req_is_mmio && mmio_state === mmio_w_commit
544  val f3_mmio_to_commit_next = RegNext(f3_mmio_to_commit)
545  val f3_mmio_can_go      = f3_mmio_to_commit && !f3_mmio_to_commit_next
546
547  val f3_ftq_flush_self    = fromFtq.redirect.valid && RedirectLevel.flushItself(fromFtq.redirect.bits.level)
548
549  val f3_need_not_flush = f3_req_is_mmio && fromFtq.redirect.valid && !f3_ftq_flush_self
550
551  when(f3_flush && !f3_need_not_flush)               {f3_valid := false.B}
552  .elsewhen(f2_fire && !f2_flush)                    {f3_valid := true.B }
553  .elsewhen(io.toIbuffer.fire() && !f3_req_is_mmio)  {f3_valid := false.B}
554  .elsewhen{f3_req_is_mmio && f3_mmio_req_commit}    {f3_valid := false.B}
555
556  val f3_mmio_use_seq_pc = RegInit(false.B)
557
558  val (redirect_ftqIdx, redirect_ftqOffset)  = (fromFtq.redirect.bits.ftqIdx,fromFtq.redirect.bits.ftqOffset)
559  val redirect_mmio_req = fromFtq.redirect.valid && redirect_ftqIdx === f3_ftq_req.ftqIdx && redirect_ftqOffset === 0.U
560
561  when(RegNext(f2_fire && !f2_flush) && f3_req_is_mmio)        { f3_mmio_use_seq_pc := true.B  }
562  .elsewhen(redirect_mmio_req)                                 { f3_mmio_use_seq_pc := false.B }
563
564  f3_ready := Mux(f3_req_is_mmio, io.toIbuffer.ready && f3_mmio_req_commit || !f3_valid , io.toIbuffer.ready || !f3_valid)
565
566  when(f3_req_is_mmio){
567    f3_data(0) := f3_mmio_data(15, 0)
568    f3_data(1) := f3_mmio_data(31, 16)
569  }
570
571  when(fromUncache.fire())    {f3_mmio_data   :=  fromUncache.bits.data}
572
573
574  switch(mmio_state){
575    is(mmio_idle){
576      when(f3_req_is_mmio){
577        mmio_state :=  mmio_send_req
578      }
579    }
580
581    is(mmio_send_req){
582      mmio_state :=  Mux(toUncache.fire(), mmio_w_resp, mmio_send_req )
583    }
584
585    is(mmio_w_resp){
586      when(fromUncache.fire()){
587          val isRVC =  fromUncache.bits.data(1,0) =/= 3.U
588          mmio_state :=  Mux(isRVC, mmio_resend , mmio_w_commit)
589      }
590    }
591
592    is(mmio_resend){
593      mmio_state :=  Mux(toUncache.fire(), mmio_resend_w_resp, mmio_resend )
594    }
595
596    is(mmio_resend_w_resp){
597      when(fromUncache.fire()){
598          mmio_state :=  mmio_w_commit
599      }
600    }
601
602    is(mmio_w_commit){
603      when(mmio_has_commited){
604          mmio_state  :=  mmio_idle
605      }
606    }
607  }
608
609  when(f3_ftq_flush_self)  {
610    mmio_state := mmio_idle
611    f3_mmio_data := 0.U
612  }
613
614  toUncache.valid     :=  ((mmio_state === mmio_send_req) || (mmio_state === mmio_resend)) && f3_req_is_mmio
615  toUncache.bits.addr := Mux((mmio_state === mmio_resend), f3_pAddrs(0) + 2.U, f3_pAddrs(0))
616  fromUncache.ready   := true.B
617
618  val f3_bank_hit = RegEnable(next = f2_bank_hit, enable = f2_fire)
619  val f3_req_0 = io.toIbuffer.fire()
620  val f3_req_1 = io.toIbuffer.fire() && f3_doubleLine
621  val f3_hit_0 = io.toIbuffer.fire() & f3_bank_hit(0)
622  val f3_hit_1 = io.toIbuffer.fire() && f3_doubleLine & f3_bank_hit(1)
623
624  preDecoderIn.instValid     :=  f3_valid && !f3_has_except
625  preDecoderIn.data          :=  f3_data
626  preDecoderIn.startAddr     :=  f3_ftq_req.startAddr
627  preDecoderIn.fallThruAddr  :=  f3_ftq_req.fallThruAddr
628  preDecoderIn.fallThruError :=  f3_ftq_req.fallThruError
629  preDecoderIn.isDoubleLine  :=  f3_doubleLine
630  preDecoderIn.ftqOffset     :=  f3_ftq_req.ftqOffset
631  preDecoderIn.target        :=  f3_ftq_req.target
632  preDecoderIn.oversize      :=  f3_ftq_req.oversize
633  preDecoderIn.lastHalfMatch :=  f3_lastHalfMatch
634  preDecoderIn.pageFault     :=  f3_except_pf
635  preDecoderIn.accessFault   :=  f3_except_af
636  preDecoderIn.mmio          :=  f3_mmio
637  preDecoderIn.frontendTrigger := io.frontendTrigger
638  preDecoderIn.csrTriggerEnable := io.csrTriggerEnable
639
640
641  // TODO: What if next packet does not match?
642  when (f3_flush) {
643    f3_lastHalf.valid := false.B
644  }.elsewhen (io.toIbuffer.fire()) {
645    f3_lastHalf.valid := preDecoderOut.hasLastHalf
646    f3_lastHalf.middlePC := preDecoderOut.realEndPC
647  }
648
649  val f3_predecode_range = VecInit(preDecoderOut.pd.map(inst => inst.valid)).asUInt
650  val f3_mmio_range      = VecInit((0 until PredictWidth).map(i => if(i ==0) true.B else false.B))
651
652  io.toIbuffer.valid          := f3_valid && (!f3_req_is_mmio || f3_mmio_can_go)
653  io.toIbuffer.bits.instrs    := preDecoderOut.instrs
654  io.toIbuffer.bits.valid     := Mux(f3_req_is_mmio, f3_mmio_range.asUInt, f3_predecode_range & preDecoderOut.instrRange.asUInt)
655  io.toIbuffer.bits.pd        := preDecoderOut.pd
656  io.toIbuffer.bits.ftqPtr    := f3_ftq_req.ftqIdx
657  io.toIbuffer.bits.pc        := preDecoderOut.pc
658  io.toIbuffer.bits.ftqOffset.zipWithIndex.map{case(a, i) => a.bits := i.U; a.valid := preDecoderOut.takens(i) && !f3_req_is_mmio}
659  io.toIbuffer.bits.foldpc    := preDecoderOut.pc.map(i => XORFold(i(VAddrBits-1,1), MemPredPCWidth))
660  io.toIbuffer.bits.ipf       := preDecoderOut.pageFault
661  io.toIbuffer.bits.acf       := preDecoderOut.accessFault
662  io.toIbuffer.bits.crossPageIPFFix := preDecoderOut.crossPageIPF
663  io.toIbuffer.bits.triggered := preDecoderOut.triggered
664
665  //Write back to Ftq
666  val f3_cache_fetch = f3_valid && !(f2_fire && !f2_flush)
667  val finishFetchMaskReg = RegNext(f3_cache_fetch)
668
669
670  val f3_mmio_missOffset = Wire(ValidUndirectioned(UInt(log2Ceil(PredictWidth).W)))
671  f3_mmio_missOffset.valid := f3_req_is_mmio
672  f3_mmio_missOffset.bits  := 0.U
673
674  toFtq.pdWb.valid           := (!finishFetchMaskReg && f3_valid && !f3_req_is_mmio) || (f3_mmio_req_commit && f3_mmio_use_seq_pc)
675  toFtq.pdWb.bits.pc         := preDecoderOut.pc
676  toFtq.pdWb.bits.pd         := preDecoderOut.pd
677  toFtq.pdWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid :=  Mux(f3_req_is_mmio, f3_mmio_range(i), f3_predecode_range(i))}
678  toFtq.pdWb.bits.ftqIdx     := f3_ftq_req.ftqIdx
679  toFtq.pdWb.bits.ftqOffset  := f3_ftq_req.ftqOffset.bits
680  toFtq.pdWb.bits.misOffset  := Mux(f3_req_is_mmio, f3_mmio_missOffset, preDecoderOut.misOffset)
681  toFtq.pdWb.bits.cfiOffset  := preDecoderOut.cfiOffset
682  toFtq.pdWb.bits.target     := Mux(f3_req_is_mmio,Mux((f3_mmio_data(1,0) =/= 3.U), f3_ftq_req.startAddr + 2.U , f3_ftq_req.startAddr + 4.U) ,preDecoderOut.target)
683  toFtq.pdWb.bits.jalTarget  := preDecoderOut.jalTarget
684  toFtq.pdWb.bits.instrRange := Mux(f3_req_is_mmio, f3_mmio_range, preDecoderOut.instrRange)
685
686  val predecodeFlush     = preDecoderOut.misOffset.valid && f3_valid
687  val predecodeFlushReg  = RegNext(predecodeFlush && !(f2_fire && !f2_flush))
688
689  val perfinfo = IO(new Bundle(){
690    val perfEvents = Output(new PerfEventsBundle(15))
691  })
692
693  val perfEvents = Seq(
694    ("frontendFlush                ", f3_redirect                                ),
695    ("ifu_req                      ", io.toIbuffer.fire()                        ),
696    ("ifu_miss                     ", io.toIbuffer.fire() && !f3_hit             ),
697    ("ifu_req_cacheline_0          ", f3_req_0                                   ),
698    ("ifu_req_cacheline_1          ", f3_req_1                                   ),
699    ("ifu_req_cacheline_0_hit      ", f3_hit_1                                   ),
700    ("ifu_req_cacheline_1_hit      ", f3_hit_1                                   ),
701    ("only_0_hit                   ", f3_only_0_hit       && io.toIbuffer.fire() ),
702    ("only_0_miss                  ", f3_only_0_miss      && io.toIbuffer.fire() ),
703    ("hit_0_hit_1                  ", f3_hit_0_hit_1      && io.toIbuffer.fire() ),
704    ("hit_0_miss_1                 ", f3_hit_0_miss_1     && io.toIbuffer.fire() ),
705    ("miss_0_hit_1                 ", f3_miss_0_hit_1     && io.toIbuffer.fire() ),
706    ("miss_0_miss_1                ", f3_miss_0_miss_1    && io.toIbuffer.fire() ),
707    ("cross_line_block             ", io.toIbuffer.fire() && f3_situation(0)     ),
708    ("fall_through_is_cacheline_end", io.toIbuffer.fire() && f3_situation(1)     ),
709  )
710
711  for (((perf_out,(perf_name,perf)),i) <- perfinfo.perfEvents.perf_events.zip(perfEvents).zipWithIndex) {
712    perf_out.incr_step := RegNext(perf)
713  }
714
715  f3_redirect := (!predecodeFlushReg && predecodeFlush && !f3_req_is_mmio) || (f3_mmio_req_commit && f3_mmio_use_seq_pc)
716
717  XSPerfAccumulate("ifu_req",   io.toIbuffer.fire() )
718  XSPerfAccumulate("ifu_miss",  io.toIbuffer.fire() && !f3_hit )
719  XSPerfAccumulate("ifu_req_cacheline_0", f3_req_0  )
720  XSPerfAccumulate("ifu_req_cacheline_1", f3_req_1  )
721  XSPerfAccumulate("ifu_req_cacheline_0_hit",   f3_hit_0 )
722  XSPerfAccumulate("ifu_req_cacheline_1_hit",   f3_hit_1 )
723  XSPerfAccumulate("frontendFlush",  f3_redirect )
724  XSPerfAccumulate("only_0_hit",      f3_only_0_hit   && io.toIbuffer.fire()  )
725  XSPerfAccumulate("only_0_miss",     f3_only_0_miss  && io.toIbuffer.fire()  )
726  XSPerfAccumulate("hit_0_hit_1",     f3_hit_0_hit_1  && io.toIbuffer.fire()  )
727  XSPerfAccumulate("hit_0_miss_1",    f3_hit_0_miss_1 && io.toIbuffer.fire()  )
728  XSPerfAccumulate("miss_0_hit_1",    f3_miss_0_hit_1  && io.toIbuffer.fire() )
729  XSPerfAccumulate("miss_0_miss_1",   f3_miss_0_miss_1 && io.toIbuffer.fire() )
730  XSPerfAccumulate("cross_line_block", io.toIbuffer.fire() && f3_situation(0) )
731  XSPerfAccumulate("fall_through_is_cacheline_end", io.toIbuffer.fire() && f3_situation(1) )
732}
733