1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants} 24import utils._ 25import utility._ 26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} 27import freechips.rocketchip.tilelink._ 28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 29 30/** Page Table Walk is divided into two parts 31 * One, PTW: page walk for pde, except for leaf entries, one by one 32 * Two, LLPTW: page walk for pte, only the leaf entries(4KB), in parallel 33 */ 34 35 36/** PTW : page table walker 37 * a finite state machine 38 * only take 1GB and 2MB page walks 39 * or in other words, except the last level(leaf) 40 **/ 41class PTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 42 val req = Flipped(DecoupledIO(new Bundle { 43 val req_info = new L2TlbInnerBundle() 44 val l3Hit = if (EnableSv48) Some(new Bool()) else None 45 val l2Hit = Bool() 46 val ppn = UInt(ptePPNLen.W) 47 val stage1Hit = Bool() 48 val stage1 = new PtwMergeResp 49 })) 50 val resp = DecoupledIO(new Bundle { 51 val source = UInt(bSourceWidth.W) 52 val s2xlate = UInt(2.W) 53 val resp = new PtwMergeResp 54 val h_resp = new HptwResp 55 }) 56 57 val llptw = DecoupledIO(new LLPTWInBundle()) 58 // NOTE: llptw change from "connect to llptw" to "connect to page cache" 59 // to avoid corner case that caused duplicate entries 60 61 val hptw = new Bundle { 62 val req = DecoupledIO(new Bundle { 63 val source = UInt(bSourceWidth.W) 64 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 65 val gvpn = UInt(ptePPNLen.W) 66 }) 67 val resp = Flipped(Valid(new Bundle { 68 val h_resp = Output(new HptwResp) 69 })) 70 } 71 val mem = new Bundle { 72 val req = DecoupledIO(new L2TlbMemReqBundle()) 73 val resp = Flipped(ValidIO(UInt(XLEN.W))) 74 val mask = Input(Bool()) 75 } 76 val pmp = new Bundle { 77 val req = ValidIO(new PMPReqBundle()) 78 val resp = Flipped(new PMPRespBundle()) 79 } 80 81 val refill = Output(new Bundle { 82 val req_info = new L2TlbInnerBundle() 83 val level = UInt(log2Up(Level + 1).W) 84 }) 85} 86 87class PTW()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents { 88 val io = IO(new PTWIO) 89 val sfence = io.sfence 90 val mem = io.mem 91 val req_s2xlate = Reg(UInt(2.W)) 92 val enableS2xlate = req_s2xlate =/= noS2xlate 93 val onlyS1xlate = req_s2xlate === onlyStage1 94 val onlyS2xlate = req_s2xlate === onlyStage2 95 val satp = Wire(new TlbSatpBundle()) 96 when (io.req.fire) { 97 satp := Mux(io.req.bits.req_info.s2xlate =/= noS2xlate, io.csr.vsatp, io.csr.satp) 98 } .otherwise { 99 satp := Mux(enableS2xlate, io.csr.vsatp, io.csr.satp) 100 } 101 val s1Pbmte = Mux(req_s2xlate =/= noS2xlate, io.csr.hPBMTE, io.csr.mPBMTE) 102 103 val mode = satp.mode 104 val hgatp = io.csr.hgatp 105 val flush = io.sfence.valid || io.csr.satp.changed || io.csr.vsatp.changed || io.csr.hgatp.changed 106 val s2xlate = enableS2xlate && !onlyS1xlate 107 val level = RegInit(3.U(log2Up(Level + 1).W)) 108 val af_level = RegInit(3.U(log2Up(Level + 1).W)) // access fault return this level 109 val gpf_level = RegInit(3.U(log2Up(Level + 1).W)) 110 val ppn = Reg(UInt(ptePPNLen.W)) 111 val vpn = Reg(UInt(vpnLen.W)) // vpn or gvpn(onlyS2xlate) 112 val levelNext = level - 1.U 113 val l3Hit = Reg(Bool()) 114 val l2Hit = Reg(Bool()) 115 val pte = mem.resp.bits.asTypeOf(new PteBundle()) 116 117 // s/w register 118 val s_pmp_check = RegInit(true.B) 119 val s_mem_req = RegInit(true.B) 120 val s_llptw_req = RegInit(true.B) 121 val w_mem_resp = RegInit(true.B) 122 val s_hptw_req = RegInit(true.B) 123 val w_hptw_resp = RegInit(true.B) 124 val s_last_hptw_req = RegInit(true.B) 125 val w_last_hptw_resp = RegInit(true.B) 126 // for updating "level" 127 val mem_addr_update = RegInit(false.B) 128 129 val idle = RegInit(true.B) 130 val finish = WireInit(false.B) 131 val sent_to_pmp = idle === false.B && (s_pmp_check === false.B || mem_addr_update) && !finish 132 133 val pageFault = pte.isPf(level, s1Pbmte) 134 val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, false.B, sent_to_pmp) 135 136 val hptw_pageFault = RegInit(false.B) 137 val hptw_accessFault = RegInit(false.B) 138 val last_s2xlate = RegInit(false.B) 139 val stage1Hit = RegEnable(io.req.bits.stage1Hit, io.req.fire) 140 val stage1 = RegEnable(io.req.bits.stage1, io.req.fire) 141 val hptw_resp_stage2 = Reg(Bool()) 142 143 val ppn_af = Mux(enableS2xlate, Mux(onlyS1xlate, pte.isAf(), false.B), pte.isAf()) // In two-stage address translation, stage 1 ppn is a vpn for host, so don't need to check ppn_high 144 val find_pte = pte.isLeaf() || ppn_af || pageFault 145 val to_find_pte = level === 1.U && find_pte === false.B 146 val source = RegEnable(io.req.bits.req_info.source, io.req.fire) 147 148 val l3addr = Wire(UInt(PAddrBits.W)) 149 val l2addr = Wire(UInt(PAddrBits.W)) 150 val l1addr = Wire(UInt(PAddrBits.W)) 151 val mem_addr = Wire(UInt(PAddrBits.W)) 152 153 l3addr := MakeAddr(satp.ppn, getVpnn(vpn, 3)) 154 if (EnableSv48) { 155 when (mode === Sv48) { 156 l2addr := MakeAddr(Mux(l3Hit, ppn, pte.getPPN()), getVpnn(vpn, 2)) 157 } .otherwise { 158 l2addr := MakeAddr(satp.ppn, getVpnn(vpn, 2)) 159 } 160 } else { 161 l2addr := MakeAddr(satp.ppn, getVpnn(vpn, 2)) 162 } 163 l1addr := MakeAddr(Mux(l2Hit, ppn, pte.getPPN()), getVpnn(vpn, 1)) 164 mem_addr := Mux(af_level === 3.U, l3addr, Mux(af_level === 2.U, l2addr, l1addr)) 165 166 val hptw_resp = Reg(new HptwResp) 167 val gpaddr = MuxCase(mem_addr, Seq( 168 stage1Hit -> Cat(stage1.genPPN(), 0.U(offLen.W)), 169 onlyS2xlate -> Cat(vpn, 0.U(offLen.W)), 170 !s_last_hptw_req -> Cat(MuxLookup(level, pte.getPPN())(Seq( 171 3.U -> Cat(pte.getPPN()(ptePPNLen - 1, vpnnLen * 3), vpn(vpnnLen * 3 - 1, 0)), 172 2.U -> Cat(pte.getPPN()(ptePPNLen - 1, vpnnLen * 2), vpn(vpnnLen * 2 - 1, 0)), 173 1.U -> Cat(pte.getPPN()(ptePPNLen - 1, vpnnLen), vpn(vpnnLen - 1, 0) 174 ))), 175 0.U(offLen.W)) 176 )) 177 val gvpn_gpf = Mux(s2xlate && io.csr.hgatp.mode === Sv39x4, gpaddr(gpaddr.getWidth - 1, GPAddrBitsSv39x4) =/= 0.U, Mux(s2xlate && io.csr.hgatp.mode === Sv48x4, gpaddr(gpaddr.getWidth - 1, GPAddrBitsSv48x4) =/= 0.U, false.B)) 178 val guestFault = hptw_pageFault || hptw_accessFault || gvpn_gpf 179 val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr)) 180 val fake_h_resp = 0.U.asTypeOf(new HptwResp) 181 fake_h_resp.entry.tag := get_pn(gpaddr) 182 fake_h_resp.entry.vmid.map(_ := io.csr.hgatp.vmid) 183 fake_h_resp.gpf := true.B 184 185 val pte_valid = RegInit(false.B) // avoid l1tlb pf from stage1 when gpf happens in the first s2xlate in PTW 186 val fake_pte = 0.U.asTypeOf(new PteBundle()) 187 fake_pte.perm.v := false.B // tell L1TLB this is fake pte 188 fake_pte.perm.r := true.B 189 fake_pte.perm.w := true.B 190 fake_pte.perm.x := true.B 191 fake_pte.perm.a := true.B 192 fake_pte.perm.d := true.B 193 fake_pte.ppn := ppn(ppnLen - 1, 0) 194 fake_pte.ppn_high := ppn(ptePPNLen - 1, ppnLen) 195 196 io.req.ready := idle 197 val ptw_resp = Wire(new PtwMergeResp) 198 ptw_resp.apply(Mux(pte_valid, pageFault && !accessFault, false.B), accessFault || (ppn_af && !(pte_valid && (pageFault || guestFault))), Mux(accessFault, af_level, Mux(guestFault, gpf_level, level)), Mux(pte_valid, pte, fake_pte), vpn, satp.asid, hgatp.vmid, vpn(sectortlbwidth - 1, 0), not_super = false, not_merge = false) 199 200 val normal_resp = idle === false.B && mem_addr_update && !last_s2xlate && (guestFault || (w_mem_resp && find_pte) || (s_pmp_check && accessFault) || onlyS2xlate ) 201 val stageHit_resp = idle === false.B && hptw_resp_stage2 202 io.resp.valid := Mux(stage1Hit, stageHit_resp, normal_resp) 203 io.resp.bits.source := source 204 io.resp.bits.resp := Mux(stage1Hit || (l3Hit || l2Hit) && guestFault && !pte_valid, stage1, ptw_resp) 205 io.resp.bits.h_resp := Mux(gvpn_gpf, fake_h_resp, hptw_resp) 206 io.resp.bits.s2xlate := req_s2xlate 207 208 io.llptw.valid := s_llptw_req === false.B && to_find_pte && !accessFault && !guestFault 209 io.llptw.bits.req_info.source := source 210 io.llptw.bits.req_info.vpn := vpn 211 io.llptw.bits.req_info.s2xlate := req_s2xlate 212 io.llptw.bits.ppn := DontCare 213 214 io.pmp.req.valid := DontCare // samecycle, do not use valid 215 io.pmp.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr) 216 io.pmp.req.bits.size := 3.U // TODO: fix it 217 io.pmp.req.bits.cmd := TlbCmd.read 218 219 mem.req.valid := s_mem_req === false.B && !mem.mask && !accessFault && s_pmp_check 220 mem.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr) 221 mem.req.bits.id := FsmReqID.U(bMemID.W) 222 mem.req.bits.hptw_bypassed := false.B 223 224 io.refill.req_info.s2xlate := req_s2xlate 225 io.refill.req_info.vpn := vpn 226 io.refill.level := level 227 io.refill.req_info.source := source 228 229 io.hptw.req.valid := !s_hptw_req || !s_last_hptw_req 230 io.hptw.req.bits.id := FsmReqID.U(bMemID.W) 231 io.hptw.req.bits.gvpn := get_pn(gpaddr) 232 io.hptw.req.bits.source := source 233 234 when (io.req.fire && io.req.bits.stage1Hit){ 235 idle := false.B 236 req_s2xlate := io.req.bits.req_info.s2xlate 237 s_last_hptw_req := false.B 238 hptw_resp_stage2 := false.B 239 last_s2xlate := false.B 240 hptw_pageFault := false.B 241 hptw_accessFault := false.B 242 } 243 244 when (io.resp.fire && stage1Hit){ 245 idle := true.B 246 } 247 248 when (io.req.fire && !io.req.bits.stage1Hit){ 249 val req = io.req.bits 250 if (EnableSv48) { 251 when (mode === Sv48) { 252 level := Mux(req.l2Hit, 1.U, Mux(req.l3Hit.get, 2.U, 3.U)) 253 af_level := Mux(req.l2Hit, 1.U, Mux(req.l3Hit.get, 2.U, 3.U)) 254 gpf_level := Mux(req.l2Hit, 2.U, Mux(req.l3Hit.get, 3.U, 0.U)) 255 ppn := Mux(req.l2Hit || req.l3Hit.get, io.req.bits.ppn, satp.ppn) 256 l3Hit := req.l3Hit.get 257 } .otherwise { 258 level := Mux(req.l2Hit, 1.U, 2.U) 259 af_level := Mux(req.l2Hit, 1.U, 2.U) 260 gpf_level := 0.U 261 ppn := Mux(req.l2Hit, io.req.bits.ppn, satp.ppn) 262 l3Hit := false.B 263 } 264 } else { 265 level := Mux(req.l2Hit, 1.U, 2.U) 266 af_level := Mux(req.l2Hit, 1.U, 2.U) 267 gpf_level := 0.U 268 ppn := Mux(req.l2Hit, io.req.bits.ppn, satp.ppn) 269 l3Hit := false.B 270 } 271 vpn := io.req.bits.req_info.vpn 272 l2Hit := req.l2Hit 273 accessFault := false.B 274 idle := false.B 275 hptw_pageFault := false.B 276 hptw_accessFault := false.B 277 pte_valid := false.B 278 req_s2xlate := io.req.bits.req_info.s2xlate 279 when(io.req.bits.req_info.s2xlate === onlyStage2){ 280 val onlys2_gpaddr = Cat(io.req.bits.req_info.vpn, 0.U(offLen.W)) // is 50 bits, don't need to check high bits when sv48x4 is enabled 281 val check_gpa_high_fail = Mux(io.req.bits.req_info.s2xlate === onlyStage2 && io.csr.hgatp.mode === Sv39x4, onlys2_gpaddr(onlys2_gpaddr.getWidth - 1, GPAddrBitsSv39x4) =/= 0.U, false.B) 282 last_s2xlate := false.B 283 when(check_gpa_high_fail){ 284 mem_addr_update := true.B 285 }.otherwise{ 286 s_last_hptw_req := false.B 287 } 288 }.elsewhen(io.req.bits.req_info.s2xlate === allStage){ 289 last_s2xlate := true.B 290 s_hptw_req := false.B 291 }.otherwise { 292 last_s2xlate := false.B 293 s_pmp_check := false.B 294 } 295 } 296 297 when(io.hptw.req.fire && s_hptw_req === false.B){ 298 s_hptw_req := true.B 299 w_hptw_resp := false.B 300 } 301 302 when(io.hptw.resp.fire && w_hptw_resp === false.B) { 303 w_hptw_resp := true.B 304 val g_perm_fail = !io.hptw.resp.bits.h_resp.entry.perm.get.r && !(io.csr.priv.mxr && io.hptw.resp.bits.h_resp.entry.perm.get.x) 305 hptw_pageFault := io.hptw.resp.bits.h_resp.gpf || g_perm_fail 306 hptw_accessFault := io.hptw.resp.bits.h_resp.gaf 307 hptw_resp := io.hptw.resp.bits.h_resp 308 hptw_resp.gpf := io.hptw.resp.bits.h_resp.gpf || g_perm_fail 309 when(!(g_perm_fail || io.hptw.resp.bits.h_resp.gpf || io.hptw.resp.bits.h_resp.gaf)) { 310 s_pmp_check := false.B 311 } 312 } 313 314 when(io.hptw.req.fire && s_last_hptw_req === false.B) { 315 w_last_hptw_resp := false.B 316 s_last_hptw_req := true.B 317 } 318 319 when (io.hptw.resp.fire && w_last_hptw_resp === false.B && stage1Hit){ 320 w_last_hptw_resp := true.B 321 hptw_resp_stage2 := true.B 322 hptw_resp := io.hptw.resp.bits.h_resp 323 } 324 325 when(io.hptw.resp.fire && w_last_hptw_resp === false.B && !stage1Hit){ 326 hptw_pageFault := io.hptw.resp.bits.h_resp.gpf 327 hptw_accessFault := io.hptw.resp.bits.h_resp.gaf 328 hptw_resp := io.hptw.resp.bits.h_resp 329 w_last_hptw_resp := true.B 330 mem_addr_update := true.B 331 last_s2xlate := false.B 332 } 333 334 when(sent_to_pmp && mem_addr_update === false.B){ 335 s_mem_req := false.B 336 s_pmp_check := true.B 337 } 338 339 when(accessFault && idle === false.B){ 340 s_pmp_check := true.B 341 s_mem_req := true.B 342 w_mem_resp := true.B 343 s_llptw_req := true.B 344 s_hptw_req := true.B 345 w_hptw_resp := true.B 346 s_last_hptw_req := true.B 347 w_last_hptw_resp := true.B 348 mem_addr_update := true.B 349 last_s2xlate := false.B 350 } 351 352 when(guestFault && idle === false.B){ 353 s_pmp_check := true.B 354 s_mem_req := true.B 355 w_mem_resp := true.B 356 s_llptw_req := true.B 357 s_hptw_req := true.B 358 w_hptw_resp := true.B 359 s_last_hptw_req := true.B 360 w_last_hptw_resp := true.B 361 mem_addr_update := true.B 362 last_s2xlate := false.B 363 } 364 365 when (mem.req.fire){ 366 s_mem_req := true.B 367 w_mem_resp := false.B 368 } 369 370 when(mem.resp.fire && w_mem_resp === false.B){ 371 w_mem_resp := true.B 372 af_level := af_level - 1.U 373 s_llptw_req := false.B 374 mem_addr_update := true.B 375 gpf_level := Mux(mode === Sv39 && !pte_valid && !(l3Hit || l2Hit), gpf_level - 2.U, gpf_level - 1.U) 376 pte_valid := true.B 377 } 378 379 when(mem_addr_update){ 380 when(level >= 2.U && !onlyS2xlate && !(guestFault || find_pte || accessFault)) { 381 level := levelNext 382 when(s2xlate){ 383 s_hptw_req := false.B 384 }.otherwise{ 385 s_mem_req := false.B 386 } 387 s_llptw_req := true.B 388 mem_addr_update := false.B 389 }.elsewhen(io.llptw.valid){ 390 when(io.llptw.fire) { 391 idle := true.B 392 s_llptw_req := true.B 393 mem_addr_update := false.B 394 last_s2xlate := false.B 395 } 396 finish := true.B 397 }.elsewhen(s2xlate && last_s2xlate === true.B) { 398 when(accessFault || pageFault || ppn_af){ 399 last_s2xlate := false.B 400 }.otherwise{ 401 s_last_hptw_req := false.B 402 mem_addr_update := false.B 403 } 404 }.elsewhen(io.resp.valid){ 405 when(io.resp.fire) { 406 idle := true.B 407 s_llptw_req := true.B 408 mem_addr_update := false.B 409 accessFault := false.B 410 } 411 finish := true.B 412 } 413 } 414 415 416 when (flush) { 417 idle := true.B 418 s_pmp_check := true.B 419 s_mem_req := true.B 420 s_llptw_req := true.B 421 w_mem_resp := true.B 422 accessFault := false.B 423 mem_addr_update := false.B 424 s_hptw_req := true.B 425 w_hptw_resp := true.B 426 s_last_hptw_req := true.B 427 w_last_hptw_resp := true.B 428 } 429 430 431 XSDebug(p"[ptw] level:${level} notFound:${pageFault}\n") 432 433 // perf 434 XSPerfAccumulate("fsm_count", io.req.fire) 435 for (i <- 0 until PtwWidth) { 436 XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire && io.req.bits.req_info.source === i.U) 437 } 438 XSPerfAccumulate("fsm_busy", !idle) 439 XSPerfAccumulate("fsm_idle", idle) 440 XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready) 441 XSPerfAccumulate("ptw_ppn_af", io.resp.fire && ppn_af) 442 XSPerfAccumulate("mem_count", mem.req.fire) 443 XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire, true)) 444 XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready) 445 446 TimeOutAssert(!idle, timeOutThreshold, "page table walker time out") 447 448 val perfEvents = Seq( 449 ("fsm_count ", io.req.fire ), 450 ("fsm_busy ", !idle ), 451 ("fsm_idle ", idle ), 452 ("resp_blocked ", io.resp.valid && !io.resp.ready ), 453 ("mem_count ", mem.req.fire ), 454 ("mem_cycle ", BoolStopWatch(mem.req.fire, mem.resp.fire, true)), 455 ("mem_blocked ", mem.req.valid && !mem.req.ready ), 456 ) 457 generatePerfEvent() 458} 459 460/*========================= LLPTW ==============================*/ 461 462/** LLPTW : Last Level Page Table Walker 463 * the page walker that only takes 4KB(last level) page walk. 464 **/ 465 466class LLPTWInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst { 467 val req_info = Output(new L2TlbInnerBundle()) 468 val ppn = Output(UInt(ptePPNLen.W)) 469} 470 471class LLPTWIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 472 val in = Flipped(DecoupledIO(new LLPTWInBundle())) 473 val out = DecoupledIO(new Bundle { 474 val req_info = Output(new L2TlbInnerBundle()) 475 val id = Output(UInt(bMemID.W)) 476 val h_resp = Output(new HptwResp) 477 val first_s2xlate_fault = Output(Bool()) // Whether the first stage 2 translation occurs pf/af 478 val af = Output(Bool()) 479 }) 480 val mem = new Bundle { 481 val req = DecoupledIO(new L2TlbMemReqBundle()) 482 val resp = Flipped(Valid(new Bundle { 483 val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W)) 484 val value = Output(UInt(blockBits.W)) 485 })) 486 val enq_ptr = Output(UInt(log2Ceil(l2tlbParams.llptwsize).W)) 487 val buffer_it = Output(Vec(l2tlbParams.llptwsize, Bool())) 488 val refill = Output(new L2TlbInnerBundle()) 489 val req_mask = Input(Vec(l2tlbParams.llptwsize, Bool())) 490 val flush_latch = Input(Vec(l2tlbParams.llptwsize, Bool())) 491 } 492 val cache = DecoupledIO(new L2TlbInnerBundle()) 493 val pmp = new Bundle { 494 val req = Valid(new PMPReqBundle()) 495 val resp = Flipped(new PMPRespBundle()) 496 } 497 val hptw = new Bundle { 498 val req = DecoupledIO(new Bundle{ 499 val source = UInt(bSourceWidth.W) 500 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 501 val gvpn = UInt(ptePPNLen.W) 502 }) 503 val resp = Flipped(Valid(new Bundle { 504 val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W)) 505 val h_resp = Output(new HptwResp) 506 })) 507 } 508} 509 510class LLPTWEntry(implicit p: Parameters) extends XSBundle with HasPtwConst { 511 val req_info = new L2TlbInnerBundle() 512 val ppn = UInt(ptePPNLen.W) 513 val wait_id = UInt(log2Up(l2tlbParams.llptwsize).W) 514 val af = Bool() 515 val hptw_resp = new HptwResp() 516 val first_s2xlate_fault = Output(Bool()) 517} 518 519 520class LLPTW(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents { 521 val io = IO(new LLPTWIO()) 522 val enableS2xlate = io.in.bits.req_info.s2xlate =/= noS2xlate 523 val satp = Mux(enableS2xlate, io.csr.vsatp, io.csr.satp) 524 val s1Pbmte = Mux(enableS2xlate, io.csr.hPBMTE, io.csr.mPBMTE) 525 526 val flush = io.sfence.valid || io.csr.satp.changed || io.csr.vsatp.changed || io.csr.hgatp.changed 527 val entries = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(0.U.asTypeOf(new LLPTWEntry())))) 528 val state_idle :: state_hptw_req :: state_hptw_resp :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: state_last_hptw_req :: state_last_hptw_resp :: state_cache :: Nil = Enum(10) 529 val state = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(state_idle))) 530 531 val is_emptys = state.map(_ === state_idle) 532 val is_mems = state.map(_ === state_mem_req) 533 val is_waiting = state.map(_ === state_mem_waiting) 534 val is_having = state.map(_ === state_mem_out) 535 val is_cache = state.map(_ === state_cache) 536 val is_hptw_req = state.map(_ === state_hptw_req) 537 val is_last_hptw_req = state.map(_ === state_last_hptw_req) 538 val is_hptw_resp = state.map(_ === state_hptw_resp) 539 val is_last_hptw_resp = state.map(_ === state_last_hptw_resp) 540 541 val full = !ParallelOR(is_emptys).asBool 542 val enq_ptr = ParallelPriorityEncoder(is_emptys) 543 544 val mem_ptr = ParallelPriorityEncoder(is_having) // TODO: optimize timing, bad: entries -> ptr -> entry 545 val mem_arb = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize)) 546 for (i <- 0 until l2tlbParams.llptwsize) { 547 mem_arb.io.in(i).bits := entries(i) 548 mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i) 549 } 550 551 // process hptw requests in serial 552 val hyper_arb1 = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize)) 553 for (i <- 0 until l2tlbParams.llptwsize) { 554 hyper_arb1.io.in(i).bits := entries(i) 555 hyper_arb1.io.in(i).valid := is_hptw_req(i) && !(Cat(is_hptw_resp).orR) && !(Cat(is_last_hptw_resp).orR) 556 } 557 val hyper_arb2 = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize)) 558 for(i <- 0 until l2tlbParams.llptwsize) { 559 hyper_arb2.io.in(i).bits := entries(i) 560 hyper_arb2.io.in(i).valid := is_last_hptw_req(i) && !(Cat(is_hptw_resp).orR) && !(Cat(is_last_hptw_resp).orR) 561 } 562 563 val cache_ptr = ParallelMux(is_cache, (0 until l2tlbParams.llptwsize).map(_.U(log2Up(l2tlbParams.llptwsize).W))) 564 565 // duplicate req 566 // to_wait: wait for the last to access mem, set to mem_resp 567 // to_cache: the last is back just right now, set to mem_cache 568 val dup_vec = state.indices.map(i => 569 dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn) && io.in.bits.req_info.s2xlate === entries(i).req_info.s2xlate 570 ) 571 val dup_req_fire = mem_arb.io.out.fire && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) && io.in.bits.req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate // dup with the req fire entry 572 val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entries, sending mem req already 573 val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now 574 val dup_vec_last_hptw = dup_vec.zipWithIndex.map{case (d, i) => d && (is_last_hptw_req(i) || is_last_hptw_resp(i))} 575 val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id))) 576 val dup_wait_resp = io.mem.resp.fire && VecInit(dup_vec_wait)(io.mem.resp.bits.id) && !io.mem.flush_latch(io.mem.resp.bits.id) // dup with the entry that data coming next cycle 577 val to_wait = Cat(dup_vec_wait).orR || dup_req_fire 578 val to_mem_out = dup_wait_resp && ((entries(io.mem.resp.bits.id).req_info.s2xlate === noS2xlate) || (entries(io.mem.resp.bits.id).req_info.s2xlate === onlyStage1)) 579 val to_cache = Cat(dup_vec_having).orR || Cat(dup_vec_last_hptw).orR 580 val to_hptw_req = io.in.bits.req_info.s2xlate === allStage 581 val to_last_hptw_req = dup_wait_resp && entries(io.mem.resp.bits.id).req_info.s2xlate === allStage 582 val last_hptw_req_id = io.mem.resp.bits.id 583 val req_paddr = MakeAddr(io.in.bits.ppn(ppnLen-1, 0), getVpnn(io.in.bits.req_info.vpn, 0)) 584 val req_hpaddr = MakeAddr(entries(last_hptw_req_id).hptw_resp.genPPNS2(get_pn(req_paddr)), getVpnn(io.in.bits.req_info.vpn, 0)) 585 val index = Mux(entries(last_hptw_req_id).req_info.s2xlate === allStage, req_hpaddr, req_paddr)(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8)) 586 val last_hptw_req_ppn = io.mem.resp.bits.value.asTypeOf(Vec(blockBits / XLEN, new PteBundle()))(index).getPPN() 587 XSError(RegNext(dup_req_fire && Cat(dup_vec_wait).orR, init = false.B), "mem req but some entries already waiting, should not happed") 588 589 XSError(io.in.fire && ((to_mem_out && to_cache) || (to_wait && to_cache)), "llptw enq, to cache conflict with to mem") 590 val mem_resp_hit = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(false.B))) 591 val enq_state_normal = MuxCase(state_addr_check, Seq( 592 to_mem_out -> state_mem_out, // same to the blew, but the mem resp now 593 to_last_hptw_req -> state_last_hptw_req, 594 to_wait -> state_mem_waiting, 595 to_cache -> state_cache, 596 to_hptw_req -> state_hptw_req 597 )) 598 val enq_state = Mux(from_pre(io.in.bits.req_info.source) && enq_state_normal =/= state_addr_check, state_idle, enq_state_normal) 599 when (io.in.fire) { 600 // if prefetch req does not need mem access, just give it up. 601 // so there will be at most 1 + FilterSize entries that needs re-access page cache 602 // so 2 + FilterSize is enough to avoid dead-lock 603 state(enq_ptr) := enq_state 604 entries(enq_ptr).req_info := io.in.bits.req_info 605 entries(enq_ptr).ppn := Mux(to_last_hptw_req, last_hptw_req_ppn, io.in.bits.ppn) 606 entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr) 607 entries(enq_ptr).af := false.B 608 entries(enq_ptr).hptw_resp := Mux(to_last_hptw_req, entries(last_hptw_req_id).hptw_resp, Mux(to_wait, entries(wait_id).hptw_resp, entries(enq_ptr).hptw_resp)) 609 entries(enq_ptr).first_s2xlate_fault := false.B 610 mem_resp_hit(enq_ptr) := to_mem_out || to_last_hptw_req 611 } 612 613 val enq_ptr_reg = RegNext(enq_ptr) 614 val need_addr_check = GatedValidRegNext(enq_state === state_addr_check && io.in.fire && !flush) 615 616 val hasHptwResp = ParallelOR(state.map(_ === state_hptw_resp)).asBool 617 val hptw_resp_ptr_reg = RegNext(io.hptw.resp.bits.id) 618 val hptw_need_addr_check = RegNext(hasHptwResp && io.hptw.resp.fire && !flush) && state(hptw_resp_ptr_reg) === state_addr_check 619 620 val ptes = io.mem.resp.bits.value.asTypeOf(Vec(blockBits / XLEN, new PteBundle())) 621 val gpaddr = MakeGPAddr(entries(hptw_resp_ptr_reg).ppn, getVpnn(entries(hptw_resp_ptr_reg).req_info.vpn, 0)) 622 val hptw_resp = entries(hptw_resp_ptr_reg).hptw_resp 623 val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr)) 624 val addr = RegEnable(MakeAddr(io.in.bits.ppn(ppnLen - 1, 0), getVpnn(io.in.bits.req_info.vpn, 0)), io.in.fire) 625 io.pmp.req.valid := need_addr_check || hptw_need_addr_check 626 io.pmp.req.bits.addr := Mux(hptw_need_addr_check, hpaddr, addr) 627 io.pmp.req.bits.cmd := TlbCmd.read 628 io.pmp.req.bits.size := 3.U // TODO: fix it 629 val pmp_resp_valid = io.pmp.req.valid // same cycle 630 when (pmp_resp_valid) { 631 // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before 632 // when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare 633 val ptr = Mux(hptw_need_addr_check, hptw_resp_ptr_reg, enq_ptr_reg); 634 val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio 635 entries(ptr).af := accessFault 636 state(ptr) := Mux(accessFault, state_mem_out, state_mem_req) 637 } 638 639 when (mem_arb.io.out.fire) { 640 for (i <- state.indices) { 641 when (state(i) =/= state_idle && state(i) =/= state_mem_out && state(i) =/= state_last_hptw_req && state(i) =/= state_last_hptw_resp 642 && entries(i).req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate 643 && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) { 644 // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait" 645 state(i) := state_mem_waiting 646 entries(i).hptw_resp := entries(mem_arb.io.chosen).hptw_resp 647 entries(i).wait_id := mem_arb.io.chosen 648 } 649 } 650 } 651 when (io.mem.resp.fire) { 652 state.indices.map{i => 653 when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) { 654 val req_paddr = MakeAddr(entries(i).ppn, getVpnn(entries(i).req_info.vpn, 0)) 655 val req_hpaddr = MakeAddr(entries(i).hptw_resp.genPPNS2(get_pn(req_paddr)), getVpnn(entries(i).req_info.vpn, 0)) 656 val index = Mux(entries(i).req_info.s2xlate === allStage, req_hpaddr, req_paddr)(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8)) 657 state(i) := Mux(entries(i).req_info.s2xlate === allStage && !(ptes(index).isPf(0.U, s1Pbmte) || !ptes(index).isLeaf() || ptes(index).isAf() || ptes(index).isStage1Gpf(io.csr.vsatp.mode)) 658 , state_last_hptw_req, state_mem_out) 659 mem_resp_hit(i) := true.B 660 entries(i).ppn := ptes(index).getPPN() // for last stage 2 translation 661 entries(i).hptw_resp.gpf := Mux(entries(i).req_info.s2xlate === allStage, ptes(index).isStage1Gpf(io.csr.vsatp.mode), false.B) 662 } 663 } 664 } 665 666 when (hyper_arb1.io.out.fire) { 667 for (i <- state.indices) { 668 when (state(i) === state_hptw_req && entries(i).ppn === hyper_arb1.io.out.bits.ppn && entries(i).req_info.s2xlate === allStage && hyper_arb1.io.chosen === i.U) { 669 state(i) := state_hptw_resp 670 entries(i).wait_id := hyper_arb1.io.chosen 671 } 672 } 673 } 674 675 when (hyper_arb2.io.out.fire) { 676 for (i <- state.indices) { 677 when (state(i) === state_last_hptw_req && entries(i).ppn === hyper_arb2.io.out.bits.ppn && entries(i).req_info.s2xlate === allStage && hyper_arb2.io.chosen === i.U) { 678 state(i) := state_last_hptw_resp 679 entries(i).wait_id := hyper_arb2.io.chosen 680 } 681 } 682 } 683 684 when (io.hptw.resp.fire) { 685 for (i <- state.indices) { 686 when (state(i) === state_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id && io.hptw.resp.bits.h_resp.entry.tag === entries(i).ppn) { 687 val check_g_perm_fail = !io.hptw.resp.bits.h_resp.entry.perm.get.r && !(io.csr.priv.mxr && io.hptw.resp.bits.h_resp.entry.perm.get.x) 688 when (check_g_perm_fail || io.hptw.resp.bits.h_resp.gaf || io.hptw.resp.bits.h_resp.gpf) { 689 state(i) := state_mem_out 690 entries(i).hptw_resp := io.hptw.resp.bits.h_resp 691 entries(i).hptw_resp.gpf := io.hptw.resp.bits.h_resp.gpf || check_g_perm_fail 692 entries(i).first_s2xlate_fault := io.hptw.resp.bits.h_resp.gaf || io.hptw.resp.bits.h_resp.gpf 693 }.otherwise{ // change the entry that is waiting hptw resp 694 val need_to_waiting_vec = state.indices.map(i => state(i) === state_mem_waiting && dup(entries(i).req_info.vpn, entries(io.hptw.resp.bits.id).req_info.vpn)) 695 val waiting_index = ParallelMux(need_to_waiting_vec zip entries.map(_.wait_id)) 696 state(i) := Mux(Cat(need_to_waiting_vec).orR, state_mem_waiting, state_addr_check) 697 entries(i).hptw_resp := io.hptw.resp.bits.h_resp 698 entries(i).wait_id := Mux(Cat(need_to_waiting_vec).orR, waiting_index, entries(i).wait_id) 699 //To do: change the entry that is having the same hptw req 700 } 701 } 702 when (state(i) === state_last_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id && io.hptw.resp.bits.h_resp.entry.tag === entries(i).ppn) { 703 state(i) := state_mem_out 704 entries(i).hptw_resp := io.hptw.resp.bits.h_resp 705 //To do: change the entry that is having the same hptw req 706 } 707 } 708 } 709 when (io.out.fire) { 710 assert(state(mem_ptr) === state_mem_out) 711 state(mem_ptr) := state_idle 712 } 713 mem_resp_hit.map(a => when (a) { a := false.B } ) 714 715 when (io.cache.fire) { 716 state(cache_ptr) := state_idle 717 } 718 XSError(io.out.fire && io.cache.fire && (mem_ptr === cache_ptr), "mem resp and cache fire at the same time at same entry") 719 720 when (flush) { 721 state.map(_ := state_idle) 722 } 723 724 io.in.ready := !full 725 726 io.out.valid := ParallelOR(is_having).asBool 727 io.out.bits.req_info := entries(mem_ptr).req_info 728 io.out.bits.id := mem_ptr 729 io.out.bits.af := entries(mem_ptr).af 730 io.out.bits.h_resp := entries(mem_ptr).hptw_resp 731 io.out.bits.first_s2xlate_fault := entries(mem_ptr).first_s2xlate_fault 732 733 val hptw_req_arb = Module(new Arbiter(new Bundle{ 734 val source = UInt(bSourceWidth.W) 735 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 736 val ppn = UInt(ptePPNLen.W) 737 } , 2)) 738 // first stage 2 translation 739 hptw_req_arb.io.in(0).valid := hyper_arb1.io.out.valid 740 hptw_req_arb.io.in(0).bits.source := hyper_arb1.io.out.bits.req_info.source 741 hptw_req_arb.io.in(0).bits.ppn := hyper_arb1.io.out.bits.ppn 742 hptw_req_arb.io.in(0).bits.id := hyper_arb1.io.chosen 743 hyper_arb1.io.out.ready := hptw_req_arb.io.in(0).ready 744 // last stage 2 translation 745 hptw_req_arb.io.in(1).valid := hyper_arb2.io.out.valid 746 hptw_req_arb.io.in(1).bits.source := hyper_arb2.io.out.bits.req_info.source 747 hptw_req_arb.io.in(1).bits.ppn := hyper_arb2.io.out.bits.ppn 748 hptw_req_arb.io.in(1).bits.id := hyper_arb2.io.chosen 749 hyper_arb2.io.out.ready := hptw_req_arb.io.in(1).ready 750 hptw_req_arb.io.out.ready := io.hptw.req.ready 751 io.hptw.req.valid := hptw_req_arb.io.out.fire && !flush 752 io.hptw.req.bits.gvpn := hptw_req_arb.io.out.bits.ppn 753 io.hptw.req.bits.id := hptw_req_arb.io.out.bits.id 754 io.hptw.req.bits.source := hptw_req_arb.io.out.bits.source 755 756 io.mem.req.valid := mem_arb.io.out.valid && !flush 757 val mem_paddr = MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0)) 758 val mem_hpaddr = MakeAddr(mem_arb.io.out.bits.hptw_resp.genPPNS2(get_pn(mem_paddr)), getVpnn(mem_arb.io.out.bits.req_info.vpn, 0)) 759 io.mem.req.bits.addr := Mux(mem_arb.io.out.bits.req_info.s2xlate === allStage, mem_hpaddr, mem_paddr) 760 io.mem.req.bits.id := mem_arb.io.chosen 761 io.mem.req.bits.hptw_bypassed := false.B 762 mem_arb.io.out.ready := io.mem.req.ready 763 val mem_refill_id = RegNext(io.mem.resp.bits.id(log2Up(l2tlbParams.llptwsize)-1, 0)) 764 io.mem.refill := entries(mem_refill_id).req_info 765 io.mem.refill.s2xlate := entries(mem_refill_id).req_info.s2xlate 766 io.mem.buffer_it := mem_resp_hit 767 io.mem.enq_ptr := enq_ptr 768 769 io.cache.valid := Cat(is_cache).orR 770 io.cache.bits := ParallelMux(is_cache, entries.map(_.req_info)) 771 772 XSPerfAccumulate("llptw_in_count", io.in.fire) 773 XSPerfAccumulate("llptw_in_block", io.in.valid && !io.in.ready) 774 for (i <- 0 until 7) { 775 XSPerfAccumulate(s"enq_state${i}", io.in.fire && enq_state === i.U) 776 } 777 for (i <- 0 until (l2tlbParams.llptwsize + 1)) { 778 XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U) 779 XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U) 780 XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U) 781 } 782 XSPerfAccumulate("mem_count", io.mem.req.fire) 783 XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U) 784 XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready) 785 786 for (i <- 0 until l2tlbParams.llptwsize) { 787 TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}") 788 } 789 790 val perfEvents = Seq( 791 ("tlbllptw_incount ", io.in.fire ), 792 ("tlbllptw_inblock ", io.in.valid && !io.in.ready), 793 ("tlbllptw_memcount ", io.mem.req.fire ), 794 ("tlbllptw_memcycle ", PopCount(is_waiting) ), 795 ) 796 generatePerfEvent() 797} 798 799/*========================= HPTW ==============================*/ 800 801/** HPTW : Hypervisor Page Table Walker 802 * the page walker take the virtual machine's page walk. 803 * guest physical address translation, guest physical address -> host physical address 804 **/ 805class HPTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 806 val req = Flipped(DecoupledIO(new Bundle { 807 val source = UInt(bSourceWidth.W) 808 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 809 val gvpn = UInt(gvpnLen.W) 810 val ppn = UInt(ppnLen.W) 811 val l3Hit = if (EnableSv48) Some(new Bool()) else None 812 val l2Hit = Bool() 813 val l1Hit = Bool() 814 val bypassed = Bool() // if bypass, don't refill 815 })) 816 val resp = DecoupledIO(new Bundle { 817 val source = UInt(bSourceWidth.W) 818 val resp = Output(new HptwResp()) 819 val id = Output(UInt(bMemID.W)) 820 }) 821 822 val mem = new Bundle { 823 val req = DecoupledIO(new L2TlbMemReqBundle()) 824 val resp = Flipped(ValidIO(UInt(XLEN.W))) 825 val mask = Input(Bool()) 826 } 827 val refill = Output(new Bundle { 828 val req_info = new L2TlbInnerBundle() 829 val level = UInt(log2Up(Level + 1).W) 830 }) 831 val pmp = new Bundle { 832 val req = ValidIO(new PMPReqBundle()) 833 val resp = Flipped(new PMPRespBundle()) 834 } 835} 836 837class HPTW()(implicit p: Parameters) extends XSModule with HasPtwConst { 838 val io = IO(new HPTWIO) 839 val hgatp = io.csr.hgatp 840 val mpbmte = io.csr.mPBMTE 841 val sfence = io.sfence 842 val flush = sfence.valid || hgatp.changed || io.csr.satp.changed || io.csr.vsatp.changed 843 val mode = hgatp.mode 844 845 val level = RegInit(3.U(log2Up(Level + 1).W)) 846 val af_level = RegInit(3.U(log2Up(Level + 1).W)) // access fault return this level 847 val gpaddr = Reg(UInt(GPAddrBits.W)) 848 val req_ppn = Reg(UInt(ppnLen.W)) 849 val vpn = gpaddr(GPAddrBits-1, offLen) 850 val levelNext = level - 1.U 851 val l3Hit = Reg(Bool()) 852 val l2Hit = Reg(Bool()) 853 val l1Hit = Reg(Bool()) 854 val bypassed = Reg(Bool()) 855// val pte = io.mem.resp.bits.MergeRespToPte() 856 val pte = io.mem.resp.bits.asTypeOf(new PteBundle().cloneType) 857 val ppn_l3 = Mux(l3Hit, req_ppn, pte.ppn) 858 val ppn_l2 = Mux(l2Hit, req_ppn, pte.ppn) 859 val ppn_l1 = Mux(l1Hit, req_ppn, pte.ppn) 860 val ppn = Wire(UInt(PAddrBits.W)) 861 val p_pte = MakeAddr(ppn, getVpnn(vpn, level)) 862 val pg_base = Wire(UInt(PAddrBits.W)) 863 val mem_addr = Wire(UInt(PAddrBits.W)) 864 if (EnableSv48) { 865 when (mode === Sv48) { 866 ppn := Mux(af_level === 2.U, ppn_l3, Mux(af_level === 1.U, ppn_l2, ppn_l1)) // for l2, l1 and l3 867 pg_base := MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 3.U, mode = Sv48)) // for l3 868 mem_addr := Mux(af_level === 3.U, pg_base, p_pte) 869 } .otherwise { 870 ppn := Mux(af_level === 1.U, ppn_l2, ppn_l1) //for l1 and l2 871 pg_base := MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 2.U, mode = Sv39)) 872 mem_addr := Mux(af_level === 2.U, pg_base, p_pte) 873 } 874 } else { 875 ppn := Mux(af_level === 1.U, ppn_l2, ppn_l1) //for l1 and l2 876 pg_base := MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 2.U, mode = Sv39)) 877 mem_addr := Mux(af_level === 2.U, pg_base, p_pte) 878 } 879 880 //s/w register 881 val s_pmp_check = RegInit(true.B) 882 val s_mem_req = RegInit(true.B) 883 val w_mem_resp = RegInit(true.B) 884 val idle = RegInit(true.B) 885 val mem_addr_update = RegInit(false.B) 886 val finish = WireInit(false.B) 887 888 val sent_to_pmp = !idle && (!s_pmp_check || mem_addr_update) && !finish 889 val pageFault = pte.isGpf(level, mpbmte) || (!pte.isLeaf() && level === 0.U) 890 val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp) 891 892 val ppn_af = pte.isAf() 893 val find_pte = pte.isLeaf() || ppn_af || pageFault 894 895 val resp_valid = !idle && mem_addr_update && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault)) 896 val id = Reg(UInt(log2Up(l2tlbParams.llptwsize).W)) 897 val source = RegEnable(io.req.bits.source, io.req.fire) 898 899 io.req.ready := idle 900 val resp = Wire(new HptwResp()) 901 // accessFault > pageFault > ppn_af 902 resp.apply( 903 gpf = pageFault && !accessFault, 904 gaf = accessFault || (ppn_af && !pageFault), 905 level = Mux(accessFault, af_level, level), 906 pte = pte, 907 vpn = vpn, 908 vmid = hgatp.vmid 909 ) 910 io.resp.valid := resp_valid 911 io.resp.bits.id := id 912 io.resp.bits.resp := resp 913 io.resp.bits.source := source 914 915 io.pmp.req.valid := DontCare 916 io.pmp.req.bits.addr := mem_addr 917 io.pmp.req.bits.size := 3.U 918 io.pmp.req.bits.cmd := TlbCmd.read 919 920 io.mem.req.valid := !s_mem_req && !io.mem.mask && !accessFault && s_pmp_check 921 io.mem.req.bits.addr := mem_addr 922 io.mem.req.bits.id := HptwReqId.U(bMemID.W) 923 io.mem.req.bits.hptw_bypassed := bypassed 924 925 io.refill.req_info.vpn := vpn 926 io.refill.level := level 927 io.refill.req_info.source := source 928 io.refill.req_info.s2xlate := onlyStage2 929 when (idle){ 930 when(io.req.fire){ 931 bypassed := io.req.bits.bypassed 932 idle := false.B 933 gpaddr := Cat(io.req.bits.gvpn, 0.U(offLen.W)) 934 accessFault := false.B 935 s_pmp_check := false.B 936 id := io.req.bits.id 937 req_ppn := io.req.bits.ppn 938 if (EnableSv48) { 939 when (mode === Sv48) { 940 level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, Mux(io.req.bits.l3Hit.get, 2.U, 3.U))) 941 af_level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, Mux(io.req.bits.l3Hit.get, 2.U, 3.U))) 942 l3Hit := io.req.bits.l3Hit.get 943 } .otherwise { 944 level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U)) 945 af_level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U)) 946 l3Hit := false.B 947 } 948 } else { 949 level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U)) 950 af_level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U)) 951 l3Hit := false.B 952 } 953 l2Hit := io.req.bits.l2Hit 954 l1Hit := io.req.bits.l1Hit 955 } 956 } 957 958 when(sent_to_pmp && !mem_addr_update){ 959 s_mem_req := false.B 960 s_pmp_check := true.B 961 } 962 963 when(accessFault && !idle){ 964 s_pmp_check := true.B 965 s_mem_req := true.B 966 w_mem_resp := true.B 967 mem_addr_update := true.B 968 } 969 970 when(io.mem.req.fire){ 971 s_mem_req := true.B 972 w_mem_resp := false.B 973 } 974 975 when(io.mem.resp.fire && !w_mem_resp){ 976 w_mem_resp := true.B 977 af_level := af_level - 1.U 978 mem_addr_update := true.B 979 } 980 981 when(mem_addr_update){ 982 when(!(find_pte || accessFault)){ 983 level := levelNext 984 s_mem_req := false.B 985 mem_addr_update := false.B 986 }.elsewhen(resp_valid){ 987 when(io.resp.fire){ 988 idle := true.B 989 mem_addr_update := false.B 990 accessFault := false.B 991 } 992 finish := true.B 993 } 994 } 995 when (flush) { 996 idle := true.B 997 s_pmp_check := true.B 998 s_mem_req := true.B 999 w_mem_resp := true.B 1000 accessFault := false.B 1001 mem_addr_update := false.B 1002 } 1003} 1004