17052722fSJay/*************************************************************************************** 27052722fSJay * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 37052722fSJay * Copyright (c) 2020-2021 Peng Cheng Laboratory 47052722fSJay * 57052722fSJay * XiangShan is licensed under Mulan PSL v2. 67052722fSJay * You can use this software according to the terms and conditions of the Mulan PSL v2. 77052722fSJay * You may obtain a copy of Mulan PSL v2 at: 87052722fSJay * http://license.coscl.org.cn/MulanPSL2 97052722fSJay * 107052722fSJay * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 117052722fSJay * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 127052722fSJay * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 137052722fSJay * 147052722fSJay * See the Mulan PSL v2 for more details. 157052722fSJay ***************************************************************************************/ 167052722fSJay 177052722fSJaypackage xiangshan.frontend.icache 187052722fSJay 198891a219SYinan Xuimport org.chipsalliance.cde.config.Parameters 207052722fSJayimport chisel3._ 217052722fSJayimport chisel3.util._ 227d45a146SYinan Xuimport difftest._ 237052722fSJayimport freechips.rocketchip.tilelink._ 247052722fSJayimport utils._ 257052722fSJayimport xiangshan.cache.mmu._ 267052722fSJayimport xiangshan.frontend._ 27d2b20d1aSTang Haojinimport xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 28d2b20d1aSTang Haojinimport huancun.PreferCacheKey 29b92c5693STang Haojinimport xiangshan.XSCoreParamsKey 30b1ded4e8Sguohongyuimport utility._ 317052722fSJay 327052722fSJayabstract class IPrefetchBundle(implicit p: Parameters) extends ICacheBundle 337052722fSJayabstract class IPrefetchModule(implicit p: Parameters) extends ICacheModule 347052722fSJay 3588895b11Sxu_zhclass IPrefetchIO(implicit p: Parameters) extends IPrefetchBundle { 36b92f8445Sssszwic // control 37b92f8445Sssszwic val csr_pf_enable = Input(Bool()) 38f80535c3Sxu_zh val csr_parity_enable = Input(Bool()) 39b92f8445Sssszwic val flush = Input(Bool()) 4058c354d0Sssszwic 41b92f8445Sssszwic val ftqReq = Flipped(new FtqToPrefetchIO) 42b92f8445Sssszwic val itlb = Vec(PortNumber, new TlbRequestIO) 43b92f8445Sssszwic val pmp = Vec(PortNumber, new ICachePMPBundle) 44b92f8445Sssszwic val metaRead = new ICacheMetaReqBundle 45b92f8445Sssszwic val MSHRReq = DecoupledIO(new ICacheMissReq) 46b92f8445Sssszwic val MSHRResp = Flipped(ValidIO(new ICacheMissResp)) 47b92f8445Sssszwic val wayLookupWrite = DecoupledIO(new WayLookupInfo) 487052722fSJay} 497052722fSJay 507052722fSJayclass IPrefetchPipe(implicit p: Parameters) extends IPrefetchModule 517052722fSJay{ 5288895b11Sxu_zh val io: IPrefetchIO = IO(new IPrefetchIO) 537052722fSJay 54cb6e5d3cSssszwic val fromFtq = io.ftqReq 55b92f8445Sssszwic val (toITLB, fromITLB) = (io.itlb.map(_.req), io.itlb.map(_.resp)) 56b92f8445Sssszwic val (toPMP, fromPMP) = (io.pmp.map(_.req), io.pmp.map(_.resp)) 57b92f8445Sssszwic val (toMeta, fromMeta) = (io.metaRead.toIMeta, io.metaRead.fromIMeta) 58b92f8445Sssszwic val (toMSHR, fromMSHR) = (io.MSHRReq, io.MSHRResp) 59b92f8445Sssszwic val toWayLookup = io.wayLookupWrite 607052722fSJay 61b92f8445Sssszwic val s0_fire, s1_fire, s2_fire = WireInit(false.B) 62b92f8445Sssszwic val s0_discard, s2_discard = WireInit(false.B) 63b92f8445Sssszwic val s0_ready, s1_ready, s2_ready = WireInit(false.B) 64b92f8445Sssszwic val s0_flush, s1_flush, s2_flush = WireInit(false.B) 65b92f8445Sssszwic val from_bpu_s0_flush, from_bpu_s1_flush = WireInit(false.B) 667052722fSJay 67cb6e5d3cSssszwic /** 68cb6e5d3cSssszwic ****************************************************************************** 69cb6e5d3cSssszwic * IPrefetch Stage 0 70b92f8445Sssszwic * - 1. receive ftq req 71b92f8445Sssszwic * - 2. send req to ITLB 72b92f8445Sssszwic * - 3. send req to Meta SRAM 73cb6e5d3cSssszwic ****************************************************************************** 74cb6e5d3cSssszwic */ 75b92f8445Sssszwic val s0_valid = fromFtq.req.valid 76cb6e5d3cSssszwic 77b92f8445Sssszwic /** 78b92f8445Sssszwic ****************************************************************************** 79b92f8445Sssszwic * receive ftq req 80b92f8445Sssszwic ****************************************************************************** 81b92f8445Sssszwic */ 82b92f8445Sssszwic val s0_req_vaddr = VecInit(Seq(fromFtq.req.bits.startAddr, fromFtq.req.bits.nextlineStart)) 83b92f8445Sssszwic val s0_req_ftqIdx = fromFtq.req.bits.ftqIdx 84b92f8445Sssszwic val s0_doubleline = fromFtq.req.bits.crossCacheline 8588895b11Sxu_zh val s0_req_vSetIdx = s0_req_vaddr.map(get_idx) 867052722fSJay 87b92f8445Sssszwic from_bpu_s0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(s0_req_ftqIdx) || 88b92f8445Sssszwic fromFtq.flushFromBpu.shouldFlushByStage3(s0_req_ftqIdx) 89b92f8445Sssszwic s0_flush := io.flush || from_bpu_s0_flush || s1_flush 907052722fSJay 91b92f8445Sssszwic val s0_can_go = s1_ready && toITLB(0).ready && toITLB(1).ready && toMeta.ready 92b92f8445Sssszwic fromFtq.req.ready := s0_can_go 937052722fSJay 94b92f8445Sssszwic s0_fire := s0_valid && s0_can_go && !s0_flush 95cb6e5d3cSssszwic 96cb6e5d3cSssszwic /** 97cb6e5d3cSssszwic ****************************************************************************** 98cb6e5d3cSssszwic * IPrefetch Stage 1 99b92f8445Sssszwic * - 1. Receive resp from ITLB 100b92f8445Sssszwic * - 2. Receive resp from IMeta and check 101b92f8445Sssszwic * - 3. Monitor the requests from missUnit to write to SRAM. 102b92f8445Sssszwic * - 4. Wirte wayLookup 103cb6e5d3cSssszwic ****************************************************************************** 104cb6e5d3cSssszwic */ 105b92f8445Sssszwic val s1_valid = generatePipeControl(lastFire = s0_fire, thisFire = s1_fire, thisFlush = s1_flush, lastFlush = false.B) 106cb6e5d3cSssszwic 107b92f8445Sssszwic val s1_req_vaddr = RegEnable(s0_req_vaddr, 0.U.asTypeOf(s0_req_vaddr), s0_fire) 108b92f8445Sssszwic val s1_doubleline = RegEnable(s0_doubleline, 0.U.asTypeOf(s0_doubleline), s0_fire) 109b92f8445Sssszwic val s1_req_ftqIdx = RegEnable(s0_req_ftqIdx, 0.U.asTypeOf(s0_req_ftqIdx), s0_fire) 11088895b11Sxu_zh val s1_req_vSetIdx = VecInit(s1_req_vaddr.map(get_idx)) 1117052722fSJay 112b92f8445Sssszwic val m_idle :: m_itlbResend :: m_metaResend :: m_enqWay :: m_enterS2 :: Nil = Enum(5) 113b92f8445Sssszwic val state = RegInit(m_idle) 114b92f8445Sssszwic val next_state = WireDefault(state) 115b92f8445Sssszwic val s0_fire_r = RegNext(s0_fire) 116b92f8445Sssszwic dontTouch(state) 117b92f8445Sssszwic dontTouch(next_state) 118b92f8445Sssszwic state := next_state 1197052722fSJay 120b92f8445Sssszwic /** 121b92f8445Sssszwic ****************************************************************************** 122b92f8445Sssszwic * resend itlb req if miss 123b92f8445Sssszwic ****************************************************************************** 124b92f8445Sssszwic */ 125b92f8445Sssszwic val s1_wait_itlb = RegInit(VecInit(Seq.fill(PortNumber)(false.B))) 126b92f8445Sssszwic (0 until PortNumber).foreach { i => 127b92f8445Sssszwic when(s1_flush) { 128b92f8445Sssszwic s1_wait_itlb(i) := false.B 129b92f8445Sssszwic }.elsewhen(RegNext(s0_fire) && fromITLB(i).bits.miss) { 130b92f8445Sssszwic s1_wait_itlb(i) := true.B 131b92f8445Sssszwic }.elsewhen(s1_wait_itlb(i) && !fromITLB(i).bits.miss) { 132b92f8445Sssszwic s1_wait_itlb(i) := false.B 133b92f8445Sssszwic } 134b92f8445Sssszwic } 135b92f8445Sssszwic val s1_need_itlb = VecInit(Seq((RegNext(s0_fire) || s1_wait_itlb(0)) && fromITLB(0).bits.miss, 136b92f8445Sssszwic (RegNext(s0_fire) || s1_wait_itlb(1)) && fromITLB(1).bits.miss && s1_doubleline)) 137b92f8445Sssszwic val tlb_valid_pulse = VecInit(Seq((RegNext(s0_fire) || s1_wait_itlb(0)) && !fromITLB(0).bits.miss, 138b92f8445Sssszwic (RegNext(s0_fire) || s1_wait_itlb(1)) && !fromITLB(1).bits.miss && s1_doubleline)) 139b92f8445Sssszwic val tlb_valid_latch = VecInit((0 until PortNumber).map(i => ValidHoldBypass(tlb_valid_pulse(i), s1_fire, flush=s1_flush))) 140b92f8445Sssszwic val itlb_finish = tlb_valid_latch(0) && (!s1_doubleline || tlb_valid_latch(1)) 1417052722fSJay 142b92f8445Sssszwic for (i <- 0 until PortNumber) { 143b92f8445Sssszwic toITLB(i).valid := s1_need_itlb(i) || (s0_valid && (if(i == 0) true.B else s0_doubleline)) 144b92f8445Sssszwic toITLB(i).bits := DontCare 145b92f8445Sssszwic toITLB(i).bits.size := 3.U 146b92f8445Sssszwic toITLB(i).bits.vaddr := Mux(s1_need_itlb(i), s1_req_vaddr(i), s0_req_vaddr(i)) 147b92f8445Sssszwic toITLB(i).bits.debug.pc := Mux(s1_need_itlb(i), s1_req_vaddr(i), s0_req_vaddr(i)) 148b92f8445Sssszwic toITLB(i).bits.cmd := TlbCmd.exec 149b92f8445Sssszwic toITLB(i).bits.no_translate := false.B 150b92f8445Sssszwic } 151b92f8445Sssszwic fromITLB.foreach(_.ready := true.B) 152b92f8445Sssszwic io.itlb.foreach(_.req_kill := false.B) 1537052722fSJay 154b92f8445Sssszwic /** 155b92f8445Sssszwic ****************************************************************************** 156b92f8445Sssszwic * Receive resp from ITLB 157b92f8445Sssszwic ****************************************************************************** 158b92f8445Sssszwic */ 159b92f8445Sssszwic val s1_req_paddr_wire = VecInit(fromITLB.map(_.bits.paddr(0))) 160b92f8445Sssszwic val s1_req_paddr_reg = VecInit((0 until PortNumber).map( i => 16188895b11Sxu_zh RegEnable(s1_req_paddr_wire(i), 0.U(PAddrBits.W), tlb_valid_pulse(i)) 16288895b11Sxu_zh )) 163b92f8445Sssszwic val s1_req_paddr = VecInit((0 until PortNumber).map( i => 16488895b11Sxu_zh Mux(tlb_valid_pulse(i), s1_req_paddr_wire(i), s1_req_paddr_reg(i)) 16588895b11Sxu_zh )) 16691946104Sxu_zh val s1_req_gpaddr_tmp = VecInit((0 until PortNumber).map( i => 16788895b11Sxu_zh ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U.asTypeOf(fromITLB(i).bits.gpaddr(0)), data = fromITLB(i).bits.gpaddr(0)) 16888895b11Sxu_zh )) 16988895b11Sxu_zh val s1_itlb_exception = VecInit((0 until PortNumber).map( i => 17088895b11Sxu_zh ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U(ExceptionType.width.W), data = ExceptionType.fromTlbResp(fromITLB(i).bits)) 17188895b11Sxu_zh )) 17288895b11Sxu_zh val s1_itlb_exception_gpf = VecInit(s1_itlb_exception.map(_ === ExceptionType.gpf)) 173b92f8445Sssszwic 17491946104Sxu_zh /* Select gpaddr with the first gpf 17591946104Sxu_zh * Note: the backend wants the base guest physical address of a fetch block 17691946104Sxu_zh * for port(i), its base gpaddr is actually (gpaddr - i * blocksize) 17791946104Sxu_zh * see GPAMem: https://github.com/OpenXiangShan/XiangShan/blob/344cf5d55568dd40cd658a9ee66047a505eeb504/src/main/scala/xiangshan/backend/GPAMem.scala#L33-L34 17891946104Sxu_zh * see also: https://github.com/OpenXiangShan/XiangShan/blob/344cf5d55568dd40cd658a9ee66047a505eeb504/src/main/scala/xiangshan/frontend/IFU.scala#L374-L375 17991946104Sxu_zh */ 18091946104Sxu_zh val s1_req_gpaddr = PriorityMuxDefault( 18188895b11Sxu_zh s1_itlb_exception_gpf zip (0 until PortNumber).map(i => s1_req_gpaddr_tmp(i) - (i << blockOffBits).U), 18291946104Sxu_zh 0.U.asTypeOf(s1_req_gpaddr_tmp(0)) 18391946104Sxu_zh ) 18491946104Sxu_zh 185b92f8445Sssszwic /** 186b92f8445Sssszwic ****************************************************************************** 187b92f8445Sssszwic * resend metaArray read req when itlb miss finish 188b92f8445Sssszwic ****************************************************************************** 189b92f8445Sssszwic */ 190b92f8445Sssszwic val s1_need_meta = ((state === m_itlbResend) && itlb_finish) || (state === m_metaResend) 191b92f8445Sssszwic toMeta.valid := s1_need_meta || s0_valid 192b92f8445Sssszwic toMeta.bits := DontCare 193b92f8445Sssszwic toMeta.bits.isDoubleLine := Mux(s1_need_meta, s1_doubleline, s0_doubleline) 194b92f8445Sssszwic 195b92f8445Sssszwic for (i <- 0 until PortNumber) { 196b92f8445Sssszwic toMeta.bits.vSetIdx(i) := Mux(s1_need_meta, s1_req_vSetIdx(i), s0_req_vSetIdx(i)) 197cb6e5d3cSssszwic } 198cb6e5d3cSssszwic 199cb6e5d3cSssszwic /** 200cb6e5d3cSssszwic ****************************************************************************** 201b92f8445Sssszwic * Receive resp from IMeta and check 202cb6e5d3cSssszwic ****************************************************************************** 203cb6e5d3cSssszwic */ 20488895b11Sxu_zh val s1_req_ptags = VecInit(s1_req_paddr.map(get_phy_tag)) 205cb6e5d3cSssszwic 206b92f8445Sssszwic val s1_meta_ptags = fromMeta.tags 207b92f8445Sssszwic val s1_meta_valids = fromMeta.entryValid 208b92f8445Sssszwic // If error is found in either way, the tag_eq_vec is unreliable, so we do not use waymask, but directly .orR 20988895b11Sxu_zh val s1_meta_corrupt = VecInit(fromMeta.errors.map(_.asUInt.orR)) 2109bba777eSssszwic 211b92f8445Sssszwic def get_waymask(paddrs: Vec[UInt]): Vec[UInt] = { 21288895b11Sxu_zh val ptags = paddrs.map(get_phy_tag) 213b92f8445Sssszwic val tag_eq_vec = VecInit((0 until PortNumber).map( p => VecInit((0 until nWays).map( w => s1_meta_ptags(p)(w) === ptags(p))))) 214b92f8445Sssszwic val tag_match_vec = VecInit((0 until PortNumber).map( k => VecInit(tag_eq_vec(k).zipWithIndex.map{ case(way_tag_eq, w) => way_tag_eq && s1_meta_valids(k)(w)}))) 215b92f8445Sssszwic val waymasks = VecInit(tag_match_vec.map(_.asUInt)) 216b92f8445Sssszwic waymasks 217cb6e5d3cSssszwic } 2189bba777eSssszwic 219b92f8445Sssszwic val s1_SRAM_waymasks = VecInit((0 until PortNumber).map(i => 220b92f8445Sssszwic Mux(tlb_valid_pulse(i), get_waymask(s1_req_paddr_wire)(i), get_waymask(s1_req_paddr_reg)(i)))) 221b92f8445Sssszwic 222b92f8445Sssszwic /** 223b92f8445Sssszwic ****************************************************************************** 224b4f1e5b2Sxu_zh * update waymask according to MSHR update data 225b92f8445Sssszwic ****************************************************************************** 226b92f8445Sssszwic */ 227b92f8445Sssszwic def update_waymask(mask: UInt, vSetIdx: UInt, ptag: UInt): UInt = { 228b92f8445Sssszwic require(mask.getWidth == nWays) 229b92f8445Sssszwic val new_mask = WireInit(mask) 230b4f1e5b2Sxu_zh val valid = fromMSHR.valid && !fromMSHR.bits.corrupt 231b4f1e5b2Sxu_zh val vset_same = fromMSHR.bits.vSetIdx === vSetIdx 232b92f8445Sssszwic val ptag_same = getPhyTagFromBlk(fromMSHR.bits.blkPaddr) === ptag 233b92f8445Sssszwic val way_same = fromMSHR.bits.waymask === mask 234b4f1e5b2Sxu_zh when(valid && vset_same) { 235b92f8445Sssszwic when(ptag_same) { 236b92f8445Sssszwic new_mask := fromMSHR.bits.waymask 237b92f8445Sssszwic }.elsewhen(way_same) { 238b92f8445Sssszwic new_mask := 0.U 239cb6e5d3cSssszwic } 240b92f8445Sssszwic } 241b92f8445Sssszwic new_mask 242b92f8445Sssszwic } 243b92f8445Sssszwic 244b92f8445Sssszwic val s1_SRAM_valid = s0_fire_r || RegNext(s1_need_meta && toMeta.ready) 245b4f1e5b2Sxu_zh val s1_MSHR_valid = fromMSHR.valid && !fromMSHR.bits.corrupt 246b4f1e5b2Sxu_zh val s1_waymasks = WireInit(VecInit(Seq.fill(PortNumber)(0.U(nWays.W)))) 247b4f1e5b2Sxu_zh val s1_waymasks_r = RegEnable(s1_waymasks, 0.U.asTypeOf(s1_waymasks), s1_SRAM_valid || s1_MSHR_valid) 248b92f8445Sssszwic (0 until PortNumber).foreach{i => 249b4f1e5b2Sxu_zh val old_waymask = Mux(s1_SRAM_valid, s1_SRAM_waymasks(i), s1_waymasks_r(i)) 250b4f1e5b2Sxu_zh s1_waymasks(i) := update_waymask(old_waymask, s1_req_vSetIdx(i), s1_req_ptags(i)) 251b92f8445Sssszwic } 252b92f8445Sssszwic 253b92f8445Sssszwic /** 254b92f8445Sssszwic ****************************************************************************** 255b92f8445Sssszwic * send enqueu req to WayLookup 256b92f8445Sssszwic ******** ********************************************************************** 257b92f8445Sssszwic */ 258b92f8445Sssszwic // Disallow enqueuing wayLookup when SRAM write occurs. 259b92f8445Sssszwic toWayLookup.valid := ((state === m_enqWay) || ((state === m_idle) && itlb_finish)) && !s1_flush && !fromMSHR.valid 260b92f8445Sssszwic toWayLookup.bits.vSetIdx := s1_req_vSetIdx 261b92f8445Sssszwic toWayLookup.bits.waymask := s1_waymasks 262b92f8445Sssszwic toWayLookup.bits.ptag := s1_req_ptags 263b92f8445Sssszwic toWayLookup.bits.gpaddr := s1_req_gpaddr 2641a5af821Sxu_zh (0 until PortNumber).foreach { i => 2651a5af821Sxu_zh val excpValid = (if (i == 0) true.B else s1_doubleline) // exception in first line is always valid, in second line is valid iff is doubleline request 26688895b11Sxu_zh // Send s1_itlb_exception to WayLookup (instead of s1_exception_out) for better timing. Will check pmp again in mainPipe 26788895b11Sxu_zh toWayLookup.bits.itlb_exception(i) := Mux(excpValid, s1_itlb_exception(i), ExceptionType.none) 26888895b11Sxu_zh toWayLookup.bits.meta_corrupt(i) := excpValid && s1_meta_corrupt(i) 2691a5af821Sxu_zh } 270b92f8445Sssszwic 271b92f8445Sssszwic val s1_waymasks_vec = s1_waymasks.map(_.asTypeOf(Vec(nWays, Bool()))) 272b92f8445Sssszwic when(toWayLookup.fire) { 273b92f8445Sssszwic assert(PopCount(s1_waymasks_vec(0)) <= 1.U && (PopCount(s1_waymasks_vec(1)) <= 1.U || !s1_doubleline), 274b92f8445Sssszwic "Multiple hit in main pipe, port0:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x port1:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x ", 275b92f8445Sssszwic PopCount(s1_waymasks_vec(0)) > 1.U, s1_req_ptags(0), get_idx(s1_req_vaddr(0)), s1_req_vaddr(0), 276b92f8445Sssszwic PopCount(s1_waymasks_vec(1)) > 1.U && s1_doubleline, s1_req_ptags(1), get_idx(s1_req_vaddr(1)), s1_req_vaddr(1)) 277b92f8445Sssszwic } 278b92f8445Sssszwic 279b92f8445Sssszwic /** 280b92f8445Sssszwic ****************************************************************************** 281b92f8445Sssszwic * PMP check 282b92f8445Sssszwic ****************************************************************************** 283b92f8445Sssszwic */ 28488895b11Sxu_zh toPMP.zipWithIndex.foreach { case (p, i) => 28588895b11Sxu_zh // if itlb has exception, paddr can be invalid, therefore pmp check can be skipped 28688895b11Sxu_zh p.valid := s1_valid // && s1_itlb_exception === ExceptionType.none 287b92f8445Sssszwic p.bits.addr := s1_req_paddr(i) 288b92f8445Sssszwic p.bits.size := 3.U // TODO 289b92f8445Sssszwic p.bits.cmd := TlbCmd.exec 290b92f8445Sssszwic } 29188895b11Sxu_zh val s1_pmp_exception = VecInit(fromPMP.map(ExceptionType.fromPMPResp)) 29288895b11Sxu_zh val s1_mmio = VecInit(fromPMP.map(_.mmio)) 29388895b11Sxu_zh 294f80535c3Sxu_zh // also raise af when meta array corrupt is detected, to cancel prefetch 295f80535c3Sxu_zh val s1_meta_exception = VecInit(s1_meta_corrupt.map(ExceptionType.fromECC(io.csr_parity_enable, _))) 296f80535c3Sxu_zh 297f80535c3Sxu_zh // merge s1 itlb/pmp/meta exceptions, itlb has the highest priority, pmp next, meta lowest 298f80535c3Sxu_zh val s1_exception_out = ExceptionType.merge( 299f80535c3Sxu_zh s1_itlb_exception, 300f80535c3Sxu_zh s1_pmp_exception, 301f80535c3Sxu_zh s1_meta_exception 302f80535c3Sxu_zh ) 303b92f8445Sssszwic 304b92f8445Sssszwic /** 305b92f8445Sssszwic ****************************************************************************** 306b92f8445Sssszwic * state machine 307b92f8445Sssszwic ******** ********************************************************************** 308b92f8445Sssszwic */ 309b92f8445Sssszwic 310b92f8445Sssszwic switch(state) { 311b92f8445Sssszwic is(m_idle) { 312b92f8445Sssszwic when(s1_valid && !itlb_finish) { 313b92f8445Sssszwic next_state := m_itlbResend 314b92f8445Sssszwic }.elsewhen(s1_valid && itlb_finish && !toWayLookup.fire) { 315b92f8445Sssszwic next_state := m_enqWay 316b92f8445Sssszwic }.elsewhen(s1_valid && itlb_finish && toWayLookup.fire && !s2_ready) { 317b92f8445Sssszwic next_state := m_enterS2 318b92f8445Sssszwic } 319b92f8445Sssszwic } 320b92f8445Sssszwic is(m_itlbResend) { 321b92f8445Sssszwic when(itlb_finish && !toMeta.ready) { 322b92f8445Sssszwic next_state := m_metaResend 323b92f8445Sssszwic }.elsewhen(itlb_finish && toMeta.ready) { 324b92f8445Sssszwic next_state := m_enqWay 325b92f8445Sssszwic } 326b92f8445Sssszwic } 327b92f8445Sssszwic is(m_metaResend) { 328b92f8445Sssszwic when(toMeta.ready) { 329b92f8445Sssszwic next_state := m_enqWay 330b92f8445Sssszwic } 331b92f8445Sssszwic } 332b92f8445Sssszwic is(m_enqWay) { 333b92f8445Sssszwic when(toWayLookup.fire && !s2_ready) { 334b92f8445Sssszwic next_state := m_enterS2 335b92f8445Sssszwic }.elsewhen(toWayLookup.fire && s2_ready) { 336b92f8445Sssszwic next_state := m_idle 337b92f8445Sssszwic } 338b92f8445Sssszwic } 339b92f8445Sssszwic is(m_enterS2) { 340b92f8445Sssszwic when(s2_ready) { 341b92f8445Sssszwic next_state := m_idle 342b92f8445Sssszwic } 343b92f8445Sssszwic } 344b92f8445Sssszwic } 345b92f8445Sssszwic 346b92f8445Sssszwic when(s1_flush) { 347b92f8445Sssszwic next_state := m_idle 348b92f8445Sssszwic } 349b92f8445Sssszwic 350b92f8445Sssszwic /** Stage 1 control */ 351b92f8445Sssszwic from_bpu_s1_flush := s1_valid && fromFtq.flushFromBpu.shouldFlushByStage3(s1_req_ftqIdx) 352b92f8445Sssszwic s1_flush := io.flush || from_bpu_s1_flush 353b92f8445Sssszwic 354b92f8445Sssszwic s1_ready := next_state === m_idle 355*400391a3Sxu_zh s1_fire := (next_state === m_idle) && s1_valid && !s1_flush // used to clear s1_valid & itlb_valid_latch 356*400391a3Sxu_zh val s1_real_fire = s1_fire && io.csr_pf_enable // real "s1 fire" that s1 enters s2 357b92f8445Sssszwic 358b92f8445Sssszwic /** 359b92f8445Sssszwic ****************************************************************************** 360b92f8445Sssszwic * IPrefetch Stage 2 361b92f8445Sssszwic * - 1. Monitor the requests from missUnit to write to SRAM. 362b92f8445Sssszwic * - 2. send req to missUnit 363b92f8445Sssszwic ****************************************************************************** 364b92f8445Sssszwic */ 365*400391a3Sxu_zh val s2_valid = generatePipeControl(lastFire = s1_real_fire, thisFire = s2_fire, thisFlush = s2_flush, lastFlush = false.B) 366b92f8445Sssszwic 367*400391a3Sxu_zh val s2_req_vaddr = RegEnable(s1_req_vaddr, 0.U.asTypeOf(s1_req_vaddr), s1_real_fire) 368*400391a3Sxu_zh val s2_doubleline = RegEnable(s1_doubleline, 0.U.asTypeOf(s1_doubleline), s1_real_fire) 369*400391a3Sxu_zh val s2_req_paddr = RegEnable(s1_req_paddr, 0.U.asTypeOf(s1_req_paddr), s1_real_fire) 370*400391a3Sxu_zh val s2_exception = RegEnable(s1_exception_out, 0.U.asTypeOf(s1_exception_out), s1_real_fire) // includes itlb/pmp/meta exception 371*400391a3Sxu_zh val s2_mmio = RegEnable(s1_mmio, 0.U.asTypeOf(s1_mmio), s1_real_fire) 372*400391a3Sxu_zh val s2_waymasks = RegEnable(s1_waymasks, 0.U.asTypeOf(s1_waymasks), s1_real_fire) 373b92f8445Sssszwic 37488895b11Sxu_zh val s2_req_vSetIdx = s2_req_vaddr.map(get_idx) 37588895b11Sxu_zh val s2_req_ptags = s2_req_paddr.map(get_phy_tag) 376b92f8445Sssszwic 377b92f8445Sssszwic /** 378b92f8445Sssszwic ****************************************************************************** 379b92f8445Sssszwic * Monitor the requests from missUnit to write to SRAM 380b92f8445Sssszwic ****************************************************************************** 381b92f8445Sssszwic */ 382b808ac73Sxu_zh 383b808ac73Sxu_zh /* NOTE: If fromMSHR.bits.corrupt, we should set s2_MSHR_hits to false.B, and send prefetch requests again. 384b808ac73Sxu_zh * This is the opposite of how mainPipe handles fromMSHR.bits.corrupt, 385b808ac73Sxu_zh * in which we should set s2_MSHR_hits to true.B, and send error to ifu. 386b808ac73Sxu_zh */ 387b808ac73Sxu_zh val s2_MSHR_match = VecInit((0 until PortNumber).map(i => 388b808ac73Sxu_zh (s2_req_vSetIdx(i) === fromMSHR.bits.vSetIdx) && 389b92f8445Sssszwic (s2_req_ptags(i) === getPhyTagFromBlk(fromMSHR.bits.blkPaddr)) && 390b808ac73Sxu_zh s2_valid && fromMSHR.valid && !fromMSHR.bits.corrupt 391b808ac73Sxu_zh )) 392b92f8445Sssszwic val s2_MSHR_hits = (0 until PortNumber).map(i => ValidHoldBypass(s2_MSHR_match(i), s2_fire || s2_flush)) 393b92f8445Sssszwic 394b808ac73Sxu_zh val s2_SRAM_hits = s2_waymasks.map(_.orR) 395b808ac73Sxu_zh val s2_hits = VecInit((0 until PortNumber).map(i => s2_MSHR_hits(i) || s2_SRAM_hits(i))) 396b808ac73Sxu_zh 397f80535c3Sxu_zh /* s2_exception includes itlb pf/gpf/af, pmp af and meta corruption (af), neither of which should be prefetched 39888895b11Sxu_zh * mmio should not be prefetched 399f80535c3Sxu_zh * also, if previous has exception, latter port should also not be prefetched 40088895b11Sxu_zh */ 401b808ac73Sxu_zh val s2_miss = VecInit((0 until PortNumber).map { i => 402b808ac73Sxu_zh !s2_hits(i) && (if (i==0) true.B else s2_doubleline) && 40388895b11Sxu_zh s2_exception.take(i+1).map(_ === ExceptionType.none).reduce(_&&_) && 40488895b11Sxu_zh s2_mmio.take(i+1).map(!_).reduce(_&&_) 405b808ac73Sxu_zh }) 406b92f8445Sssszwic 407b92f8445Sssszwic /** 408b92f8445Sssszwic ****************************************************************************** 409b92f8445Sssszwic * send req to missUnit 410b92f8445Sssszwic ****************************************************************************** 411b92f8445Sssszwic */ 412b92f8445Sssszwic val toMSHRArbiter = Module(new Arbiter(new ICacheMissReq, PortNumber)) 413b92f8445Sssszwic 414b92f8445Sssszwic // To avoid sending duplicate requests. 415b808ac73Sxu_zh val has_send = RegInit(VecInit(Seq.fill(PortNumber)(false.B))) 416b92f8445Sssszwic (0 until PortNumber).foreach{ i => 417*400391a3Sxu_zh when(s1_real_fire) { 418b92f8445Sssszwic has_send(i) := false.B 419b92f8445Sssszwic }.elsewhen(toMSHRArbiter.io.in(i).fire) { 420b92f8445Sssszwic has_send(i) := true.B 421b92f8445Sssszwic } 422b92f8445Sssszwic } 423b92f8445Sssszwic 424b92f8445Sssszwic (0 until PortNumber).map{ i => 425b92f8445Sssszwic toMSHRArbiter.io.in(i).valid := s2_valid && s2_miss(i) && !has_send(i) 426b92f8445Sssszwic toMSHRArbiter.io.in(i).bits.blkPaddr := getBlkAddr(s2_req_paddr(i)) 427b92f8445Sssszwic toMSHRArbiter.io.in(i).bits.vSetIdx := s2_req_vSetIdx(i) 428b92f8445Sssszwic } 429b92f8445Sssszwic 430b92f8445Sssszwic toMSHR <> toMSHRArbiter.io.out 431b92f8445Sssszwic 432b92f8445Sssszwic s2_flush := io.flush 433b92f8445Sssszwic 434b92f8445Sssszwic val s2_finish = (0 until PortNumber).map(i => has_send(i) || !s2_miss(i) || toMSHRArbiter.io.in(i).fire).reduce(_&&_) 435b92f8445Sssszwic s2_ready := s2_finish || !s2_valid 436b92f8445Sssszwic s2_fire := s2_valid && s2_finish && !s2_flush 4379bba777eSssszwic 438cb6e5d3cSssszwic /** PerfAccumulate */ 439cb6e5d3cSssszwic // the number of prefetch request received from ftq 440935edac4STang Haojin XSPerfAccumulate("prefetch_req_receive", fromFtq.req.fire) 441b92f8445Sssszwic // the number of prefetch request sent to missUnit 442b92f8445Sssszwic XSPerfAccumulate("prefetch_req_send", toMSHR.fire) 443b92f8445Sssszwic XSPerfAccumulate("to_missUnit_stall", toMSHR.valid && !toMSHR.ready) 444cb6e5d3cSssszwic /** 445cb6e5d3cSssszwic * Count the number of requests that are filtered for various reasons. 446cb6e5d3cSssszwic * The number of prefetch discard in Performance Accumulator may be 447cb6e5d3cSssszwic * a littel larger the number of really discarded. Because there can 448cb6e5d3cSssszwic * be multiple reasons for a canceled request at the same time. 449cb6e5d3cSssszwic */ 450b92f8445Sssszwic // discard prefetch request by flush 451b92f8445Sssszwic // XSPerfAccumulate("fdip_prefetch_discard_by_tlb_except", p1_discard && p1_tlb_except) 452b92f8445Sssszwic // // discard prefetch request by hit icache SRAM 453b92f8445Sssszwic // XSPerfAccumulate("fdip_prefetch_discard_by_hit_cache", p2_discard && p1_meta_hit) 454b92f8445Sssszwic // // discard prefetch request by hit wirte SRAM 455b92f8445Sssszwic // XSPerfAccumulate("fdip_prefetch_discard_by_p1_monoitor", p1_discard && p1_monitor_hit) 456b92f8445Sssszwic // // discard prefetch request by pmp except or mmio 457b92f8445Sssszwic // XSPerfAccumulate("fdip_prefetch_discard_by_pmp", p2_discard && p2_pmp_except) 458b92f8445Sssszwic // // discard prefetch request by hit mainPipe info 459b92f8445Sssszwic // // XSPerfAccumulate("fdip_prefetch_discard_by_mainPipe", p2_discard && p2_mainPipe_hit) 4607052722fSJay}