1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.experimental.ExtModule 22import chisel3.util._ 23import xiangshan._ 24import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants} 25import utils._ 26import utility._ 27import freechips.rocketchip.diplomacy.{IdRange, LazyModule, LazyModuleImp} 28import freechips.rocketchip.tilelink._ 29import xiangshan.backend.fu.{PMP, PMPChecker, PMPReqBundle, PMPRespBundle} 30import xiangshan.backend.fu.util.HasCSRConst 31import difftest._ 32 33class L2TLB()(implicit p: Parameters) extends LazyModule with HasPtwConst { 34 override def shouldBeInlined: Boolean = false 35 36 val node = TLClientNode(Seq(TLMasterPortParameters.v1( 37 clients = Seq(TLMasterParameters.v1( 38 "ptw", 39 sourceId = IdRange(0, MemReqWidth) 40 )), 41 requestFields = Seq(ReqSourceField()) 42 ))) 43 44 lazy val module = new L2TLBImp(this) 45} 46 47class L2TLBImp(outer: L2TLB)(implicit p: Parameters) extends PtwModule(outer) with HasCSRConst with HasPerfEvents { 48 49 val (mem, edge) = outer.node.out.head 50 51 val io = IO(new L2TLBIO) 52 val difftestIO = IO(new Bundle() { 53 val ptwResp = Output(Bool()) 54 val ptwAddr = Output(UInt(64.W)) 55 val ptwData = Output(Vec(4, UInt(64.W))) 56 }) 57 58 /* Ptw processes multiple requests 59 * Divide Ptw procedure into two stages: cache access ; mem access if cache miss 60 * miss queue itlb dtlb 61 * | | | 62 * ------arbiter------ 63 * | 64 * l1 - l2 - l3 - sp 65 * | 66 * ------------------------------------------- 67 * miss | queue | hit 68 * [][][][][][] | 69 * | | 70 * state machine accessing mem | 71 * | | 72 * ---------------arbiter--------------------- 73 * | | 74 * itlb dtlb 75 */ 76 77 difftestIO <> DontCare 78 79 val sfence_tmp = DelayN(io.sfence, 1) 80 val csr_tmp = DelayN(io.csr.tlb, 1) 81 val sfence_dup = Seq.fill(9)(RegNext(sfence_tmp)) 82 val csr_dup = Seq.fill(8)(RegNext(csr_tmp)) 83 val satp = csr_dup(0).satp 84 val vsatp = csr_dup(0).vsatp 85 val hgatp = csr_dup(0).hgatp 86 val priv = csr_dup(0).priv 87 val flush = sfence_dup(0).valid || satp.changed || vsatp.changed || hgatp.changed 88 89 val pmp = Module(new PMP()) 90 val pmp_check = VecInit(Seq.fill(3)(Module(new PMPChecker(lgMaxSize = 3, sameCycle = true)).io)) 91 pmp.io.distribute_csr := io.csr.distribute_csr 92 pmp_check.foreach(_.check_env.apply(ModeS, pmp.io.pmp, pmp.io.pma)) 93 94 val missQueue = Module(new L2TlbMissQueue) 95 val cache = Module(new PtwCache) 96 val ptw = Module(new PTW) 97 val hptw = Module(new HPTW) 98 val llptw = Module(new LLPTW) 99 val blockmq = Module(new BlockHelper(3)) 100 val arb1 = Module(new Arbiter(new PtwReq, PtwWidth)) 101 val arb2 = Module(new Arbiter(new Bundle { 102 val vpn = UInt(vpnLen.W) 103 val s2xlate = UInt(2.W) 104 val source = UInt(bSourceWidth.W) 105 }, (if (l2tlbParams.enablePrefetch) 4 else 3) + (if(HasHExtension) 1 else 0))) 106 val hptw_req_arb = Module(new Arbiter(new Bundle { 107 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 108 val source = UInt(bSourceWidth.W) 109 val gvpn = UInt(vpnLen.W) 110 }, 2)) 111 val hptw_resp_arb = Module(new Arbiter(new Bundle { 112 val resp = new HptwResp() 113 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 114 }, 2)) 115 val outArb = (0 until PtwWidth).map(i => Module(new Arbiter(new Bundle { 116 val s2xlate = UInt(2.W) 117 val s1 = new PtwSectorResp () 118 val s2 = new HptwResp() 119 }, 1)).io) 120 val mergeArb = (0 until PtwWidth).map(i => Module(new Arbiter(new Bundle { 121 val s2xlate = UInt(2.W) 122 val s1 = new PtwMergeResp() 123 val s2 = new HptwResp() 124 }, 3)).io) 125 val outArbCachePort = 0 126 val outArbFsmPort = 1 127 val outArbMqPort = 2 128 129 // hptw arb input port 130 val InHptwArbPTWPort = 0 131 val InHptwArbLLPTWPort = 1 132 hptw_req_arb.io.in(InHptwArbPTWPort).valid := ptw.io.hptw.req.valid 133 hptw_req_arb.io.in(InHptwArbPTWPort).bits.gvpn := ptw.io.hptw.req.bits.gvpn 134 hptw_req_arb.io.in(InHptwArbPTWPort).bits.id := ptw.io.hptw.req.bits.id 135 hptw_req_arb.io.in(InHptwArbPTWPort).bits.source := ptw.io.hptw.req.bits.source 136 ptw.io.hptw.req.ready := hptw_req_arb.io.in(InHptwArbPTWPort).ready 137 138 hptw_req_arb.io.in(InHptwArbLLPTWPort).valid := llptw.io.hptw.req.valid 139 hptw_req_arb.io.in(InHptwArbLLPTWPort).bits.gvpn := llptw.io.hptw.req.bits.gvpn 140 hptw_req_arb.io.in(InHptwArbLLPTWPort).bits.id := llptw.io.hptw.req.bits.id 141 hptw_req_arb.io.in(InHptwArbLLPTWPort).bits.source := llptw.io.hptw.req.bits.source 142 llptw.io.hptw.req.ready := hptw_req_arb.io.in(InHptwArbLLPTWPort).ready 143 144 val hptw_id = RegInit(0.U(log2Up(l2tlbParams.llptwsize).W)) 145 when(hptw_req_arb.io.out.valid) { 146 hptw_id := hptw_req_arb.io.out.bits.id 147 } 148 // arb2 input port 149 val InArbPTWPort = 0 150 val InArbMissQueuePort = 1 151 val InArbTlbPort = 2 152 val InArbPrefetchPort = 3 153 val InArbHPTWPort = 4 154 // NOTE: when cache out but miss and ptw doesnt accept, 155 arb1.io.in <> VecInit(io.tlb.map(_.req(0))) 156 157 158 arb2.io.in(InArbPTWPort).valid := ptw.io.llptw.valid 159 arb2.io.in(InArbPTWPort).bits.vpn := ptw.io.llptw.bits.req_info.vpn 160 arb2.io.in(InArbPTWPort).bits.s2xlate := ptw.io.llptw.bits.req_info.s2xlate 161 arb2.io.in(InArbPTWPort).bits.source := ptw.io.llptw.bits.req_info.source 162 ptw.io.llptw.ready := arb2.io.in(InArbPTWPort).ready 163 block_decoupled(missQueue.io.out, arb2.io.in(InArbMissQueuePort), !ptw.io.req.ready) 164 165 arb2.io.in(InArbTlbPort).valid := arb1.io.out.valid 166 arb2.io.in(InArbTlbPort).bits.vpn := arb1.io.out.bits.vpn 167 arb2.io.in(InArbTlbPort).bits.s2xlate := arb1.io.out.bits.s2xlate 168 arb2.io.in(InArbTlbPort).bits.source := arb1.io.chosen 169 arb1.io.out.ready := arb2.io.in(InArbTlbPort).ready 170 171 arb2.io.in(InArbHPTWPort).valid := hptw_req_arb.io.out.valid 172 arb2.io.in(InArbHPTWPort).bits.vpn := hptw_req_arb.io.out.bits.gvpn 173 arb2.io.in(InArbHPTWPort).bits.s2xlate := onlyStage2 174 arb2.io.in(InArbHPTWPort).bits.source := hptw_req_arb.io.out.bits.source 175 hptw_req_arb.io.out.ready := arb2.io.in(InArbHPTWPort).ready 176 if (l2tlbParams.enablePrefetch) { 177 val prefetch = Module(new L2TlbPrefetch()) 178 val recv = cache.io.resp 179 // NOTE: 1. prefetch doesn't gen prefetch 2. req from mq doesn't gen prefetch 180 // NOTE: 1. miss req gen prefetch 2. hit but prefetched gen prefetch 181 prefetch.io.in.valid := recv.fire && !from_pre(recv.bits.req_info.source) && (!recv.bits.hit || 182 recv.bits.prefetch) && recv.bits.isFirst 183 prefetch.io.in.bits.vpn := recv.bits.req_info.vpn 184 prefetch.io.sfence := sfence_dup(0) 185 prefetch.io.csr := csr_dup(0) 186 arb2.io.in(InArbPrefetchPort) <> prefetch.io.out 187 188 val isWriteL2TlbPrefetchTable = WireInit(Constantin.createRecord("isWriteL2TlbPrefetchTable" + p(XSCoreParamsKey).HartId.toString)) 189 val L2TlbPrefetchTable = ChiselDB.createTable("L2TlbPrefetch_hart" + p(XSCoreParamsKey).HartId.toString, new L2TlbPrefetchDB) 190 val L2TlbPrefetchDB = Wire(new L2TlbPrefetchDB) 191 L2TlbPrefetchDB.vpn := prefetch.io.out.bits.vpn 192 L2TlbPrefetchTable.log(L2TlbPrefetchDB, isWriteL2TlbPrefetchTable.orR && prefetch.io.out.fire, "L2TlbPrefetch", clock, reset) 193 } 194 arb2.io.out.ready := cache.io.req.ready 195 196 197 val mq_arb = Module(new Arbiter(new L2TlbInnerBundle, 2)) 198 mq_arb.io.in(0).valid := cache.io.resp.valid && !cache.io.resp.bits.hit && 199 (!cache.io.resp.bits.toFsm.l2Hit || cache.io.resp.bits.bypassed) && 200 !from_pre(cache.io.resp.bits.req_info.source) && 201 (cache.io.resp.bits.bypassed || cache.io.resp.bits.isFirst || !ptw.io.req.ready) 202 mq_arb.io.in(0).bits := cache.io.resp.bits.req_info 203 mq_arb.io.in(1) <> llptw.io.cache 204 missQueue.io.in <> mq_arb.io.out 205 missQueue.io.sfence := sfence_dup(6) 206 missQueue.io.csr := csr_dup(5) 207 208 blockmq.io.start := missQueue.io.out.fire 209 blockmq.io.enable := ptw.io.req.fire 210 211 llptw.io.in.valid := cache.io.resp.valid && 212 !cache.io.resp.bits.hit && 213 cache.io.resp.bits.toFsm.l2Hit && 214 !cache.io.resp.bits.bypassed && 215 !cache.io.resp.bits.isHptw 216 llptw.io.in.bits.req_info := cache.io.resp.bits.req_info 217 llptw.io.in.bits.ppn := cache.io.resp.bits.toFsm.ppn 218 llptw.io.sfence := sfence_dup(1) 219 llptw.io.csr := csr_dup(1) 220 221 cache.io.req.valid := arb2.io.out.valid 222 cache.io.req.bits.req_info.vpn := arb2.io.out.bits.vpn 223 cache.io.req.bits.req_info.s2xlate := arb2.io.out.bits.s2xlate 224 cache.io.req.bits.req_info.source := arb2.io.out.bits.source 225 cache.io.req.bits.isFirst := arb2.io.chosen =/= InArbMissQueuePort.U 226 cache.io.req.bits.isHptw := arb2.io.chosen === InArbHPTWPort.U 227 cache.io.req.bits.hptwId := hptw_id 228 cache.io.req.bits.bypassed.map(_ := false.B) 229 cache.io.sfence := sfence_dup(2) 230 cache.io.csr := csr_dup(2) 231 cache.io.sfence_dup.zip(sfence_dup.drop(2).take(4)).map(s => s._1 := s._2) 232 cache.io.csr_dup.zip(csr_dup.drop(2).take(3)).map(c => c._1 := c._2) 233 cache.io.resp.ready := Mux(cache.io.resp.bits.hit, 234 outReady(cache.io.resp.bits.req_info.source, outArbCachePort), 235 Mux(cache.io.resp.bits.toFsm.l2Hit && !cache.io.resp.bits.bypassed, llptw.io.in.ready, 236 Mux(cache.io.resp.bits.bypassed || cache.io.resp.bits.isFirst, mq_arb.io.in(0).ready, mq_arb.io.in(0).ready || ptw.io.req.ready))) 237 238 // NOTE: missQueue req has higher priority 239 ptw.io.req.valid := cache.io.resp.valid && !cache.io.resp.bits.hit && !cache.io.resp.bits.toFsm.l2Hit && 240 !cache.io.resp.bits.bypassed && 241 !cache.io.resp.bits.isFirst && 242 !cache.io.resp.bits.isHptw 243 ptw.io.req.bits.req_info := cache.io.resp.bits.req_info 244 ptw.io.req.bits.l1Hit := cache.io.resp.bits.toFsm.l1Hit 245 ptw.io.req.bits.ppn := cache.io.resp.bits.toFsm.ppn 246 ptw.io.sfence := sfence_dup(7) 247 ptw.io.csr := csr_dup(6) 248 ptw.io.resp.ready := outReady(ptw.io.resp.bits.source, outArbFsmPort) 249 250 hptw.io.req.valid := cache.io.resp.valid && !cache.io.resp.bits.hit && !cache.io.resp.bits.bypassed & cache.io.resp.bits.isHptw 251 hptw.io.req.bits.gvpn := cache.io.resp.bits.req_info.vpn 252 hptw.io.req.bits.id := cache.io.resp.bits.toHptw.id 253 hptw.io.req.bits.source := cache.io.resp.bits.req_info.source 254 hptw.io.req.bits.l1Hit := cache.io.resp.bits.toHptw.l1Hit 255 hptw.io.req.bits.l2Hit := cache.io.resp.bits.toHptw.l2Hit 256 hptw.io.sfence := sfence_dup(8) 257 hptw.io.csr := csr_dup(7) 258 // mem req 259 def blockBytes_align(addr: UInt) = { 260 Cat(addr(PAddrBits - 1, log2Up(l2tlbParams.blockBytes)), 0.U(log2Up(l2tlbParams.blockBytes).W)) 261 } 262 def addr_low_from_vpn(vpn: UInt) = { 263 vpn(log2Ceil(l2tlbParams.blockBytes)-log2Ceil(XLEN/8)-1, 0) 264 } 265 def addr_low_from_paddr(paddr: UInt) = { 266 paddr(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8)) 267 } 268 def from_llptw(id: UInt) = { 269 id < l2tlbParams.llptwsize.U 270 } 271 def from_ptw(id: UInt) = { 272 id === l2tlbParams.llptwsize.U 273 } 274 def from_hptw(id: UInt) = { 275 id === l2tlbParams.llptwsize.U + 1.U 276 } 277 val waiting_resp = RegInit(VecInit(Seq.fill(MemReqWidth)(false.B))) 278 val flush_latch = RegInit(VecInit(Seq.fill(MemReqWidth)(false.B))) 279 for (i <- waiting_resp.indices) { 280 assert(!flush_latch(i) || waiting_resp(i)) // when sfence_latch wait for mem resp, waiting_resp should be true 281 } 282 283 val llptw_out = llptw.io.out 284 val llptw_mem = llptw.io.mem 285 llptw_mem.req_mask := waiting_resp.take(l2tlbParams.llptwsize) 286 ptw.io.mem.mask := waiting_resp.apply(l2tlbParams.llptwsize) 287 hptw.io.mem.mask := waiting_resp.apply(l2tlbParams.llptwsize + 1) 288 289 val mem_arb = Module(new Arbiter(new L2TlbMemReqBundle(), 3)) 290 mem_arb.io.in(0) <> ptw.io.mem.req 291 mem_arb.io.in(1) <> llptw_mem.req 292 mem_arb.io.in(2) <> hptw.io.mem.req 293 mem_arb.io.out.ready := mem.a.ready && !flush 294 295 // assert, should not send mem access at same addr for twice. 296 val last_resp_vpn = RegEnable(cache.io.refill.bits.req_info_dup(0).vpn, cache.io.refill.valid) 297 val last_resp_level = RegEnable(cache.io.refill.bits.level_dup(0), cache.io.refill.valid) 298 val last_resp_v = RegInit(false.B) 299 val last_has_invalid = !Cat(cache.io.refill.bits.ptes.asTypeOf(Vec(blockBits/XLEN, UInt(XLEN.W))).map(a => a(0))).andR || cache.io.refill.bits.sel_pte_dup(0).asTypeOf(new PteBundle).isAf() 300 when (cache.io.refill.valid) { last_resp_v := !last_has_invalid} 301 when (flush) { last_resp_v := false.B } 302 XSError(last_resp_v && cache.io.refill.valid && 303 (cache.io.refill.bits.req_info_dup(0).vpn === last_resp_vpn) && 304 (cache.io.refill.bits.level_dup(0) === last_resp_level), 305 "l2tlb should not access mem at same addr for twice") 306 // ATTENTION: this may wrongly assert when: a ptes is l2, last part is valid, 307 // but the current part is invalid, so one more mem access happened 308 // If this happened, remove the assert. 309 310 val req_addr_low = Reg(Vec(MemReqWidth, UInt((log2Up(l2tlbParams.blockBytes)-log2Up(XLEN/8)).W))) 311 312 when (llptw.io.in.fire) { 313 // when enq miss queue, set the req_addr_low to receive the mem resp data part 314 req_addr_low(llptw_mem.enq_ptr) := addr_low_from_vpn(llptw.io.in.bits.req_info.vpn) 315 } 316 when (mem_arb.io.out.fire) { 317 req_addr_low(mem_arb.io.out.bits.id) := addr_low_from_paddr(mem_arb.io.out.bits.addr) 318 waiting_resp(mem_arb.io.out.bits.id) := true.B 319 } 320 // mem read 321 val memRead = edge.Get( 322 fromSource = mem_arb.io.out.bits.id, 323 // toAddress = memAddr(log2Up(CacheLineSize / 2 / 8) - 1, 0), 324 toAddress = blockBytes_align(mem_arb.io.out.bits.addr), 325 lgSize = log2Up(l2tlbParams.blockBytes).U 326 )._2 327 mem.a.bits := memRead 328 mem.a.valid := mem_arb.io.out.valid && !flush 329 mem.a.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.PTW.id.U) 330 mem.d.ready := true.B 331 // mem -> data buffer 332 val refill_data = Reg(Vec(blockBits / l1BusDataWidth, UInt(l1BusDataWidth.W))) 333 val refill_helper = edge.firstlastHelper(mem.d.bits, mem.d.fire) 334 val mem_resp_done = refill_helper._3 335 val mem_resp_from_llptw = from_llptw(mem.d.bits.source) 336 val mem_resp_from_ptw = from_ptw(mem.d.bits.source) 337 val mem_resp_from_hptw = from_hptw(mem.d.bits.source) 338 when (mem.d.valid) { 339 assert(mem.d.bits.source < MemReqWidth.U) 340 refill_data(refill_helper._4) := mem.d.bits.data 341 } 342 // refill_data_tmp is the wire fork of refill_data, but one cycle earlier 343 val refill_data_tmp = WireInit(refill_data) 344 refill_data_tmp(refill_helper._4) := mem.d.bits.data 345 346 // save only one pte for each id 347 // (miss queue may can't resp to tlb with low latency, it should have highest priority, but diffcult to design cache) 348 val resp_pte = VecInit((0 until MemReqWidth).map(i => 349 if (i == l2tlbParams.llptwsize + 1) {RegEnable(get_part(refill_data_tmp, req_addr_low(i)), mem_resp_done && mem_resp_from_hptw) } 350 else if (i == l2tlbParams.llptwsize) {RegEnable(get_part(refill_data_tmp, req_addr_low(i)), mem_resp_done && mem_resp_from_ptw) } 351 else { DataHoldBypass(get_part(refill_data, req_addr_low(i)), llptw_mem.buffer_it(i)) } 352 // llptw could not use refill_data_tmp, because enq bypass's result works at next cycle 353 )) 354 355 // save eight ptes for each id when sector tlb 356 // (miss queue may can't resp to tlb with low latency, it should have highest priority, but diffcult to design cache) 357 val resp_pte_sector = VecInit((0 until MemReqWidth).map(i => 358 if (i == l2tlbParams.llptwsize + 1) {RegEnable(refill_data_tmp, mem_resp_done && mem_resp_from_hptw) } 359 else if (i == l2tlbParams.llptwsize) {RegEnable(refill_data_tmp, mem_resp_done && mem_resp_from_ptw) } 360 else { DataHoldBypass(refill_data, llptw_mem.buffer_it(i)) } 361 // llptw could not use refill_data_tmp, because enq bypass's result works at next cycle 362 )) 363 364 // mem -> llptw 365 llptw_mem.resp.valid := mem_resp_done && mem_resp_from_llptw 366 llptw_mem.resp.bits.id := DataHoldBypass(mem.d.bits.source, mem.d.valid) 367 // mem -> ptw 368 ptw.io.mem.req.ready := mem.a.ready 369 ptw.io.mem.resp.valid := mem_resp_done && mem_resp_from_ptw 370 ptw.io.mem.resp.bits := resp_pte.apply(l2tlbParams.llptwsize) 371 // mem -> hptw 372 hptw.io.mem.req.ready := mem.a.ready 373 hptw.io.mem.resp.valid := mem_resp_done && mem_resp_from_hptw 374 hptw.io.mem.resp.bits := resp_pte.apply(l2tlbParams.llptwsize + 1) 375 // mem -> cache 376 val refill_from_llptw = mem_resp_from_llptw 377 val refill_from_ptw = mem_resp_from_ptw 378 val refill_from_hptw = mem_resp_from_hptw 379 val refill_level = Mux(refill_from_llptw, 2.U, Mux(refill_from_ptw, RegEnable(ptw.io.refill.level, init = 0.U, ptw.io.mem.req.fire()), RegEnable(hptw.io.refill.level, init = 0.U, hptw.io.mem.req.fire()))) 380 val refill_valid = mem_resp_done && !flush && !flush_latch(mem.d.bits.source) 381 382 cache.io.refill.valid := RegNext(refill_valid, false.B) 383 cache.io.refill.bits.ptes := refill_data.asUInt 384 cache.io.refill.bits.req_info_dup.map(_ := RegEnable(Mux(refill_from_llptw, llptw_mem.refill, Mux(refill_from_ptw, ptw.io.refill.req_info, hptw.io.refill.req_info)), refill_valid)) 385 cache.io.refill.bits.level_dup.map(_ := RegEnable(refill_level, refill_valid)) 386 cache.io.refill.bits.levelOH(refill_level, refill_valid) 387 cache.io.refill.bits.sel_pte_dup.map(_ := RegNext(sel_data(refill_data_tmp.asUInt, req_addr_low(mem.d.bits.source)))) 388 389 if (env.EnableDifftest) { 390 val difftest_ptw_addr = RegInit(VecInit(Seq.fill(MemReqWidth)(0.U(PAddrBits.W)))) 391 when (mem.a.valid) { 392 difftest_ptw_addr(mem.a.bits.source) := mem.a.bits.address 393 } 394 395 val difftest = DifftestModule(new DiffRefillEvent, dontCare = true) 396 difftest.coreid := io.hartId 397 difftest.index := 2.U 398 difftest.valid := cache.io.refill.valid 399 difftest.addr := difftest_ptw_addr(RegNext(mem.d.bits.source)) 400 difftest.data := refill_data.asTypeOf(difftest.data) 401 difftest.idtfr := DontCare 402 } 403 404 if (env.EnableDifftest) { 405 for (i <- 0 until PtwWidth) { 406 val difftest = DifftestModule(new DiffL2TLBEvent) 407 difftest.clock := clock 408 difftest.coreid := p(XSCoreParamsKey).HartId.asUInt 409 difftest.valid := io.tlb(i).resp.fire && !io.tlb(i).resp.bits.s1.af && !io.tlb(i).resp.bits.s2.gaf 410 difftest.index := i.U 411 difftest.vpn := Cat(io.tlb(i).resp.bits.s1.entry.tag, 0.U(sectortlbwidth.W)) 412 for (j <- 0 until tlbcontiguous) { 413 difftest.ppn(j) := Cat(io.tlb(i).resp.bits.entry.ppn, io.tlb(i).resp.bits.ppn_low(j)) 414 difftest.valididx(j) := io.tlb(i).resp.bits.valididx(j) 415 difftest.pteidx(j) := io.tlb(i).resp.bits.s1.pteidx(j) 416 } 417 difftest.perm := io.tlb(i).resp.bits.s1.entry.perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt 418 difftest.level := io.tlb(i).resp.bits.s1.entry.level.getOrElse(0.U.asUInt) 419 difftest.pf := io.tlb(i).resp.bits.s1.pf 420 difftest.satp := Cat(io.csr.tlb.satp.mode, io.csr.tlb.satp.asid, io.csr.tlb.satp.ppn) 421 difftest.vsatp := Cat(io.csr.tlb.vsatp.mode, io.csr.tlb.vsatp.asid, io.csr.tlb.vsatp.ppn) 422 difftest.hgatp := Cat(io.csr.tlb.hgatp.mode, io.csr.tlb.hgatp.asid, io.csr.tlb.hgatp.ppn) 423 difftest.gvpn := io.tlb(i).resp.bits.s2.entry.tag 424 difftest.g_perm := io.tlb(i).resp.bits.s2.entry.perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt 425 difftest.g_level := io.tlb(i).resp.bits.s2.entry.level.getOrElse(0.U.asUInt) 426 difftest.s2ppn := io.tlb(i).resp.bits.s2.entry.ppn 427 difftest.gpf := io.tlb(i).resp.bits.s2.gpf 428 difftest.s2xlate := io.tlb(i).resp.bits.s2xlate 429 } 430 } 431 432 // pmp 433 pmp_check(0).req <> ptw.io.pmp.req 434 ptw.io.pmp.resp <> pmp_check(0).resp 435 pmp_check(1).req <> llptw.io.pmp.req 436 llptw.io.pmp.resp <> pmp_check(1).resp 437 pmp_check(2).req <> hptw.io.pmp.req 438 hptw.io.pmp.resp <> pmp_check(2).resp 439 440 llptw_out.ready := outReady(llptw_out.bits.req_info.source, outArbMqPort) 441 442 // hptw and page cache -> ptw and llptw 443 val HptwRespArbCachePort = 0 444 val HptwRespArbHptw = 1 445 hptw_resp_arb.io.in(HptwRespArbCachePort).valid := cache.io.resp.valid && cache.io.resp.bits.hit && cache.io.resp.bits.isHptw 446 hptw_resp_arb.io.in(HptwRespArbCachePort).bits.id := cache.io.resp.bits.toHptw.id 447 hptw_resp_arb.io.in(HptwRespArbCachePort).bits.resp := cache.io.resp.bits.toHptw.resp 448 hptw_resp_arb.io.in(HptwRespArbHptw).valid := hptw.io.resp.valid 449 hptw_resp_arb.io.in(HptwRespArbHptw).bits.id := hptw.io.resp.bits.id 450 hptw_resp_arb.io.in(HptwRespArbHptw).bits.resp := hptw.io.resp.bits.resp 451 452 ptw.io.hptw.resp.valid := hptw_resp_arb.io.out.valid && hptw_resp_arb.io.out.bits.id === FsmReqID.U 453 ptw.io.hptw.resp.bits.h_resp := hptw_resp_arb.io.out.bits.resp 454 llptw.io.hptw.resp.valid := hptw_resp_arb.io.out.valid && hptw_resp_arb.io.out.bits.id =/= FsmReqID.U 455 llptw.io.hptw.resp.bits.id := hptw_resp_arb.io.out.bits.id 456 llptw.io.hptw.resp.bits.h_resp := hptw_resp_arb.io.out.bits.resp 457 hptw_resp_arb.io.out.ready := true.B 458 459 // Timing: Maybe need to do some optimization or even add one more cycle 460 for (i <- 0 until PtwWidth) { 461 mergeArb(i).in(outArbCachePort).valid := cache.io.resp.valid && cache.io.resp.bits.hit && cache.io.resp.bits.req_info.source===i.U 462 mergeArb(i).in(outArbCachePort).bits.s2xlate := cache.io.resp.bits.req_info.s2xlate 463 mergeArb(i).in(outArbCachePort).bits.s1 := cache.io.resp.bits.toTlb 464 mergeArb(i).in(outArbCachePort).bits.s2 := cache.io.resp.bits.toHptw.resp 465 mergeArb(i).in(outArbFsmPort).valid := ptw.io.resp.valid && ptw.io.resp.bits.source===i.U 466 mergeArb(i).in(outArbFsmPort).bits.s2xlate := ptw.io.resp.bits.s2xlate 467 mergeArb(i).in(outArbFsmPort).bits.s1 := ptw.io.resp.bits.resp 468 mergeArb(i).in(outArbFsmPort).bits.s2 := ptw.io.resp.bits.h_resp 469 mergeArb(i).in(outArbMqPort).valid := llptw_out.valid && llptw_out.bits.req_info.source===i.U 470 mergeArb(i).in(outArbMqPort).bits.s2xlate := llptw_out.bits.req_info.s2xlate 471 mergeArb(i).in(outArbMqPort).bits.s1 := contiguous_pte_to_merge_ptwResp(resp_pte_sector(llptw_out.bits.id).asUInt, llptw_out.bits.req_info.vpn, llptw_out.bits.af, true, s2xlate = llptw_out.bits.req_info.s2xlate) 472 mergeArb(i).in(outArbMqPort).bits.s2 := llptw_out.bits.h_resp 473 mergeArb(i).out.ready := outArb(i).in(0).ready 474 } 475 476 for (i <- 0 until PtwWidth) { 477 outArb(i).in(0).valid := mergeArb(i).out.valid 478 outArb(i).in(0).bits.s2xlate := mergeArb(i).out.bits.s2xlate 479 outArb(i).in(0).bits.s1 := merge_ptwResp_to_sector_ptwResp(mergeArb(i).out.bits.s1) 480 outArb(i).in(0).bits.s2 := mergeArb(i).out.bits.s2 481 } 482 483 // io.tlb.map(_.resp) <> outArb.map(_.out) 484 io.tlb.map(_.resp).zip(outArb.map(_.out)).map{ 485 case (resp, out) => resp <> out 486 } 487 488 // sfence 489 when (flush) { 490 for (i <- 0 until MemReqWidth) { 491 when (waiting_resp(i)) { 492 flush_latch(i) := true.B 493 } 494 } 495 } 496 // mem -> control signal 497 // waiting_resp and sfence_latch will be reset when mem_resp_done 498 when (mem_resp_done) { 499 waiting_resp(mem.d.bits.source) := false.B 500 flush_latch(mem.d.bits.source) := false.B 501 } 502 503 def block_decoupled[T <: Data](source: DecoupledIO[T], sink: DecoupledIO[T], block_signal: Bool) = { 504 sink.valid := source.valid && !block_signal 505 source.ready := sink.ready && !block_signal 506 sink.bits := source.bits 507 } 508 509 def get_part(data: Vec[UInt], index: UInt): UInt = { 510 val inner_data = data.asTypeOf(Vec(data.getWidth / XLEN, UInt(XLEN.W))) 511 inner_data(index) 512 } 513 514 // not_super means that this is a normal page 515 // valididx(i) will be all true when super page to be convenient for l1 tlb matching 516 def contiguous_pte_to_merge_ptwResp(pte: UInt, vpn: UInt, af: Bool, af_first: Boolean, not_super: Boolean = true, s2xlate: UInt) : PtwMergeResp = { 517 assert(tlbcontiguous == 8, "Only support tlbcontiguous = 8!") 518 val ptw_merge_resp = Wire(new PtwMergeResp()) 519 val hasS2xlate = s2xlate =/= noS2xlate 520 for (i <- 0 until tlbcontiguous) { 521 val pte_in = pte(64 * i + 63, 64 * i).asTypeOf(new PteBundle()) 522 val ptw_resp = Wire(new PtwMergeEntry(tagLen = sectorvpnLen, hasPerm = true, hasLevel = true)) 523 ptw_resp.ppn := pte_in.ppn(ppnLen - 1, sectortlbwidth) 524 ptw_resp.ppn_low := pte_in.ppn(sectortlbwidth - 1, 0) 525 ptw_resp.level.map(_ := 2.U) 526 ptw_resp.perm.map(_ := pte_in.getPerm()) 527 ptw_resp.tag := vpn(vpnLen - 1, sectortlbwidth) 528 ptw_resp.pf := (if (af_first) !af else true.B) && pte_in.isPf(2.U) 529 ptw_resp.af := (if (!af_first) pte_in.isPf(2.U) else true.B) && (af || pte_in.isAf()) 530 ptw_resp.v := !ptw_resp.pf 531 ptw_resp.prefetch := DontCare 532 ptw_resp.asid := Mux(hasS2xlate, vsatp.asid, satp.asid) 533 ptw_resp.vmid.map(_ := hgatp.asid) 534 ptw_merge_resp.entry(i) := ptw_resp 535 } 536 ptw_merge_resp.pteidx := UIntToOH(vpn(sectortlbwidth - 1, 0)).asBools 537 ptw_merge_resp.not_super := not_super.B 538 ptw_merge_resp 539 } 540 541 def merge_ptwResp_to_sector_ptwResp(pte: PtwMergeResp) : PtwSectorResp = { 542 assert(tlbcontiguous == 8, "Only support tlbcontiguous = 8!") 543 val ptw_sector_resp = Wire(new PtwSectorResp) 544 ptw_sector_resp.entry.tag := pte.entry(OHToUInt(pte.pteidx)).tag 545 ptw_sector_resp.entry.asid := pte.entry(OHToUInt(pte.pteidx)).asid 546 ptw_sector_resp.entry.vmid.map(_ := pte.entry(OHToUInt(pte.pteidx)).vmid.getOrElse(0.U)) 547 ptw_sector_resp.entry.ppn := pte.entry(OHToUInt(pte.pteidx)).ppn 548 ptw_sector_resp.entry.perm.map(_ := pte.entry(OHToUInt(pte.pteidx)).perm.getOrElse(0.U.asTypeOf(new PtePermBundle))) 549 ptw_sector_resp.entry.level.map(_ := pte.entry(OHToUInt(pte.pteidx)).level.getOrElse(0.U(2.W))) 550 ptw_sector_resp.entry.prefetch := pte.entry(OHToUInt(pte.pteidx)).prefetch 551 ptw_sector_resp.entry.v := pte.entry(OHToUInt(pte.pteidx)).v 552 ptw_sector_resp.af := pte.entry(OHToUInt(pte.pteidx)).af 553 ptw_sector_resp.pf := pte.entry(OHToUInt(pte.pteidx)).pf 554 ptw_sector_resp.addr_low := OHToUInt(pte.pteidx) 555 ptw_sector_resp.pteidx := pte.pteidx 556 for (i <- 0 until tlbcontiguous) { 557 val ppn_equal = pte.entry(i).ppn === pte.entry(OHToUInt(pte.pteidx)).ppn 558 val perm_equal = pte.entry(i).perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt === pte.entry(OHToUInt(pte.pteidx)).perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt 559 val v_equal = pte.entry(i).v === pte.entry(OHToUInt(pte.pteidx)).v 560 val af_equal = pte.entry(i).af === pte.entry(OHToUInt(pte.pteidx)).af 561 val pf_equal = pte.entry(i).pf === pte.entry(OHToUInt(pte.pteidx)).pf 562 ptw_sector_resp.valididx(i) := (ppn_equal && perm_equal && v_equal && af_equal && pf_equal) || !pte.not_super 563 ptw_sector_resp.ppn_low(i) := pte.entry(i).ppn_low 564 } 565 ptw_sector_resp.valididx(OHToUInt(pte.pteidx)) := true.B 566 ptw_sector_resp 567 } 568 569 def outReady(source: UInt, port: Int): Bool = { 570 MuxLookup(source, true.B)((0 until PtwWidth).map(i => i.U -> mergeArb(i).in(port).ready)) 571 } 572 573 // debug info 574 for (i <- 0 until PtwWidth) { 575 XSDebug(p"[io.tlb(${i.U})] ${io.tlb(i)}\n") 576 } 577 XSDebug(p"[sfence] ${io.sfence}\n") 578 XSDebug(p"[io.csr.tlb] ${io.csr.tlb}\n") 579 580 for (i <- 0 until PtwWidth) { 581 XSPerfAccumulate(s"req_count${i}", io.tlb(i).req(0).fire) 582 XSPerfAccumulate(s"req_blocked_count_${i}", io.tlb(i).req(0).valid && !io.tlb(i).req(0).ready) 583 } 584 XSPerfAccumulate(s"req_blocked_by_mq", arb1.io.out.valid && missQueue.io.out.valid) 585 for (i <- 0 until (MemReqWidth + 1)) { 586 XSPerfAccumulate(s"mem_req_util${i}", PopCount(waiting_resp) === i.U) 587 } 588 XSPerfAccumulate("mem_cycle", PopCount(waiting_resp) =/= 0.U) 589 XSPerfAccumulate("mem_count", mem.a.fire) 590 for (i <- 0 until PtwWidth) { 591 XSPerfAccumulate(s"llptw_ppn_af${i}", mergeArb(i).in(outArbMqPort).valid && mergeArb(i).in(outArbMqPort).bits.s1.entry(OHToUInt(mergeArb(i).in(outArbMqPort).bits.s1.pteidx)).af && !llptw_out.bits.af) 592 XSPerfAccumulate(s"access_fault${i}", io.tlb(i).resp.fire && io.tlb(i).resp.bits.s1.af) 593 } 594 595 // print configs 596 println(s"${l2tlbParams.name}: a ptw, a llptw with size ${l2tlbParams.llptwsize}, miss queue size ${MissQueueSize} l1:${l2tlbParams.l1Size} fa l2: nSets ${l2tlbParams.l2nSets} nWays ${l2tlbParams.l2nWays} l3: ${l2tlbParams.l3nSets} nWays ${l2tlbParams.l3nWays} blockBytes:${l2tlbParams.blockBytes}") 597 598 // time out assert 599 for (i <- 0 until MemReqWidth) { 600 TimeOutAssert(waiting_resp(i), timeOutThreshold, s"ptw mem resp time out wait_resp${i}") 601 TimeOutAssert(flush_latch(i), timeOutThreshold, s"ptw mem resp time out flush_latch${i}") 602 } 603 604 605 val perfEvents = Seq(llptw, cache, ptw).flatMap(_.getPerfEvents) 606 generatePerfEvent() 607 608 val isWriteL1TlbTable = WireInit(Constantin.createRecord("isWriteL1TlbTable" + p(XSCoreParamsKey).HartId.toString)) 609 val L1TlbTable = ChiselDB.createTable("L1Tlb_hart" + p(XSCoreParamsKey).HartId.toString, new L1TlbDB) 610 val ITlbReqDB, DTlbReqDB, ITlbRespDB, DTlbRespDB = Wire(new L1TlbDB) 611 ITlbReqDB.vpn := io.tlb(0).req(0).bits.vpn 612 DTlbReqDB.vpn := io.tlb(1).req(0).bits.vpn 613 ITlbRespDB.vpn := io.tlb(0).resp.bits.s1.entry.tag 614 DTlbRespDB.vpn := io.tlb(1).resp.bits.s1.entry.tag 615 L1TlbTable.log(ITlbReqDB, isWriteL1TlbTable.orR && io.tlb(0).req(0).fire, "ITlbReq", clock, reset) 616 L1TlbTable.log(DTlbReqDB, isWriteL1TlbTable.orR && io.tlb(1).req(0).fire, "DTlbReq", clock, reset) 617 L1TlbTable.log(ITlbRespDB, isWriteL1TlbTable.orR && io.tlb(0).resp.fire, "ITlbResp", clock, reset) 618 L1TlbTable.log(DTlbRespDB, isWriteL1TlbTable.orR && io.tlb(1).resp.fire, "DTlbResp", clock, reset) 619 620 val isWritePageCacheTable = WireInit(Constantin.createRecord("isWritePageCacheTable" + p(XSCoreParamsKey).HartId.toString)) 621 val PageCacheTable = ChiselDB.createTable("PageCache_hart" + p(XSCoreParamsKey).HartId.toString, new PageCacheDB) 622 val PageCacheDB = Wire(new PageCacheDB) 623 PageCacheDB.vpn := Cat(cache.io.resp.bits.toTlb.entry(0).tag, OHToUInt(cache.io.resp.bits.toTlb.pteidx)) 624 PageCacheDB.source := cache.io.resp.bits.req_info.source 625 PageCacheDB.bypassed := cache.io.resp.bits.bypassed 626 PageCacheDB.is_first := cache.io.resp.bits.isFirst 627 PageCacheDB.prefetched := cache.io.resp.bits.toTlb.entry(0).prefetch 628 PageCacheDB.prefetch := cache.io.resp.bits.prefetch 629 PageCacheDB.l2Hit := cache.io.resp.bits.toFsm.l2Hit 630 PageCacheDB.l1Hit := cache.io.resp.bits.toFsm.l1Hit 631 PageCacheDB.hit := cache.io.resp.bits.hit 632 PageCacheTable.log(PageCacheDB, isWritePageCacheTable.orR && cache.io.resp.fire, "PageCache", clock, reset) 633 634 val isWritePTWTable = WireInit(Constantin.createRecord("isWritePTWTable" + p(XSCoreParamsKey).HartId.toString)) 635 val PTWTable = ChiselDB.createTable("PTW_hart" + p(XSCoreParamsKey).HartId.toString, new PTWDB) 636 val PTWReqDB, PTWRespDB, LLPTWReqDB, LLPTWRespDB = Wire(new PTWDB) 637 PTWReqDB.vpn := ptw.io.req.bits.req_info.vpn 638 PTWReqDB.source := ptw.io.req.bits.req_info.source 639 PTWRespDB.vpn := ptw.io.refill.req_info.vpn 640 PTWRespDB.source := ptw.io.refill.req_info.source 641 LLPTWReqDB.vpn := llptw.io.in.bits.req_info.vpn 642 LLPTWReqDB.source := llptw.io.in.bits.req_info.source 643 LLPTWRespDB.vpn := llptw.io.mem.refill.vpn 644 LLPTWRespDB.source := llptw.io.mem.refill.source 645 PTWTable.log(PTWReqDB, isWritePTWTable.orR && ptw.io.req.fire, "PTWReq", clock, reset) 646 PTWTable.log(PTWRespDB, isWritePTWTable.orR && ptw.io.mem.resp.fire, "PTWResp", clock, reset) 647 PTWTable.log(LLPTWReqDB, isWritePTWTable.orR && llptw.io.in.fire, "LLPTWReq", clock, reset) 648 PTWTable.log(LLPTWRespDB, isWritePTWTable.orR && llptw.io.mem.resp.fire, "LLPTWResp", clock, reset) 649 650 val isWriteL2TlbMissQueueTable = WireInit(Constantin.createRecord("isWriteL2TlbMissQueueTable" + p(XSCoreParamsKey).HartId.toString)) 651 val L2TlbMissQueueTable = ChiselDB.createTable("L2TlbMissQueue_hart" + p(XSCoreParamsKey).HartId.toString, new L2TlbMissQueueDB) 652 val L2TlbMissQueueInDB, L2TlbMissQueueOutDB = Wire(new L2TlbMissQueueDB) 653 L2TlbMissQueueInDB.vpn := missQueue.io.in.bits.vpn 654 L2TlbMissQueueOutDB.vpn := missQueue.io.out.bits.vpn 655 L2TlbMissQueueTable.log(L2TlbMissQueueInDB, isWriteL2TlbMissQueueTable.orR && missQueue.io.in.fire, "L2TlbMissQueueIn", clock, reset) 656 L2TlbMissQueueTable.log(L2TlbMissQueueOutDB, isWriteL2TlbMissQueueTable.orR && missQueue.io.out.fire, "L2TlbMissQueueOut", clock, reset) 657} 658 659/** BlockHelper, block missqueue, not to send too many req to cache 660 * Parameter: 661 * enable: enable BlockHelper, mq should not send too many reqs 662 * start: when miss queue out fire and need, block miss queue's out 663 * block: block miss queue's out 664 * latency: last missqueue out's cache access latency 665 */ 666class BlockHelper(latency: Int)(implicit p: Parameters) extends XSModule { 667 val io = IO(new Bundle { 668 val enable = Input(Bool()) 669 val start = Input(Bool()) 670 val block = Output(Bool()) 671 }) 672 673 val count = RegInit(0.U(log2Ceil(latency).W)) 674 val valid = RegInit(false.B) 675 val work = RegInit(true.B) 676 677 io.block := valid 678 679 when (io.start && work) { valid := true.B } 680 when (valid) { count := count + 1.U } 681 when (count === (latency.U) || io.enable) { 682 valid := false.B 683 work := io.enable 684 count := 0.U 685 } 686} 687 688class PTEHelper() extends ExtModule { 689 val clock = IO(Input(Clock())) 690 val enable = IO(Input(Bool())) 691 val satp = IO(Input(UInt(64.W))) 692 val vpn = IO(Input(UInt(64.W))) 693 val pte = IO(Output(UInt(64.W))) 694 val level = IO(Output(UInt(8.W))) 695 val pf = IO(Output(UInt(8.W))) 696} 697 698class PTWDelayN[T <: Data](gen: T, n: Int, flush: Bool) extends Module { 699 val io = IO(new Bundle() { 700 val in = Input(gen) 701 val out = Output(gen) 702 val ptwflush = Input(flush.cloneType) 703 }) 704 val out = RegInit(VecInit(Seq.fill(n)(0.U.asTypeOf(gen)))) 705 val t = RegInit(VecInit(Seq.fill(n)(0.U.asTypeOf(gen)))) 706 out(0) := io.in 707 if (n == 1) { 708 io.out := out(0) 709 } else { 710 when (io.ptwflush) { 711 for (i <- 0 until n) { 712 t(i) := 0.U.asTypeOf(gen) 713 out(i) := 0.U.asTypeOf(gen) 714 } 715 io.out := 0.U.asTypeOf(gen) 716 } .otherwise { 717 for (i <- 1 until n) { 718 t(i-1) := out(i-1) 719 out(i) := t(i-1) 720 } 721 io.out := out(n-1) 722 } 723 } 724} 725 726object PTWDelayN { 727 def apply[T <: Data](in: T, n: Int, flush: Bool): T = { 728 val delay = Module(new PTWDelayN(in.cloneType, n, flush)) 729 delay.io.in := in 730 delay.io.ptwflush := flush 731 delay.io.out 732 } 733} 734 735class FakePTW()(implicit p: Parameters) extends XSModule with HasPtwConst { 736 val io = IO(new L2TLBIO) 737 val flush = VecInit(Seq.fill(PtwWidth)(false.B)) 738 flush(0) := DelayN(io.sfence.valid || io.csr.tlb.satp.changed, itlbParams.fenceDelay) 739 flush(1) := DelayN(io.sfence.valid || io.csr.tlb.satp.changed, ldtlbParams.fenceDelay) 740 for (i <- 0 until PtwWidth) { 741 val helper = Module(new PTEHelper()) 742 helper.clock := clock 743 helper.satp := io.csr.tlb.satp.ppn 744 745 if (coreParams.softPTWDelay == 1) { 746 helper.enable := io.tlb(i).req(0).fire 747 helper.vpn := io.tlb(i).req(0).bits.vpn 748 } else { 749 helper.enable := PTWDelayN(io.tlb(i).req(0).fire, coreParams.softPTWDelay - 1, flush(i)) 750 helper.vpn := PTWDelayN(io.tlb(i).req(0).bits.vpn, coreParams.softPTWDelay - 1, flush(i)) 751 } 752 753 val pte = helper.pte.asTypeOf(new PteBundle) 754 val level = helper.level 755 val pf = helper.pf 756 val empty = RegInit(true.B) 757 when (io.tlb(i).req(0).fire) { 758 empty := false.B 759 } .elsewhen (io.tlb(i).resp.fire || flush(i)) { 760 empty := true.B 761 } 762 763 io.tlb(i).req(0).ready := empty || io.tlb(i).resp.fire 764 io.tlb(i).resp.valid := PTWDelayN(io.tlb(i).req(0).fire, coreParams.softPTWDelay, flush(i)) 765 assert(!io.tlb(i).resp.valid || io.tlb(i).resp.ready) 766 io.tlb(i).resp.bits.s1.entry.tag := PTWDelayN(io.tlb(i).req(0).bits.vpn, coreParams.softPTWDelay, flush(i)) 767 io.tlb(i).resp.bits.s1.entry.ppn := pte.ppn 768 io.tlb(i).resp.bits.s1.entry.perm.map(_ := pte.getPerm()) 769 io.tlb(i).resp.bits.s1.entry.level.map(_ := level) 770 io.tlb(i).resp.bits.s1.pf := pf 771 io.tlb(i).resp.bits.s1.af := DontCare // TODO: implement it 772 io.tlb(i).resp.bits.s1.entry.v := !pf 773 io.tlb(i).resp.bits.s1.entry.prefetch := DontCare 774 io.tlb(i).resp.bits.s1.entry.asid := io.csr.tlb.satp.asid 775 } 776} 777 778class L2TLBWrapper()(implicit p: Parameters) extends LazyModule with HasXSParameter { 779 override def shouldBeInlined: Boolean = false 780 val useSoftPTW = coreParams.softPTW 781 val node = if (!useSoftPTW) TLIdentityNode() else null 782 val ptw = if (!useSoftPTW) LazyModule(new L2TLB()) else null 783 if (!useSoftPTW) { 784 node := ptw.node 785 } 786 787 class L2TLBWrapperImp(wrapper: LazyModule) extends LazyModuleImp(wrapper) with HasPerfEvents { 788 val io = IO(new L2TLBIO) 789 val perfEvents = if (useSoftPTW) { 790 val fake_ptw = Module(new FakePTW()) 791 io <> fake_ptw.io 792 Seq() 793 } 794 else { 795 io <> ptw.module.io 796 ptw.module.getPerfEvents 797 } 798 generatePerfEvent() 799 } 800 801 lazy val module = new L2TLBWrapperImp(this) 802} 803