1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants} 24import utils._ 25import utility._ 26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} 27import freechips.rocketchip.tilelink._ 28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 29 30/** Page Table Walk is divided into two parts 31 * One, PTW: page walk for pde, except for leaf entries, one by one 32 * Two, LLPTW: page walk for pte, only the leaf entries(4KB), in parallel 33 */ 34 35 36/** PTW : page table walker 37 * a finite state machine 38 * only take 1GB and 2MB page walks 39 * or in other words, except the last level(leaf) 40 **/ 41class PTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 42 val req = Flipped(DecoupledIO(new Bundle { 43 val req_info = new L2TlbInnerBundle() 44 val l3Hit = if (EnableSv48) Some(new Bool()) else None 45 val l2Hit = Bool() 46 val ppn = UInt(ptePPNLen.W) 47 val stage1Hit = Bool() 48 val stage1 = new PtwMergeResp 49 })) 50 val resp = DecoupledIO(new Bundle { 51 val source = UInt(bSourceWidth.W) 52 val s2xlate = UInt(2.W) 53 val resp = new PtwMergeResp 54 val h_resp = new HptwResp 55 }) 56 57 val llptw = DecoupledIO(new LLPTWInBundle()) 58 // NOTE: llptw change from "connect to llptw" to "connect to page cache" 59 // to avoid corner case that caused duplicate entries 60 61 val hptw = new Bundle { 62 val req = DecoupledIO(new Bundle { 63 val source = UInt(bSourceWidth.W) 64 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 65 val gvpn = UInt(ptePPNLen.W) 66 }) 67 val resp = Flipped(Valid(new Bundle { 68 val h_resp = Output(new HptwResp) 69 })) 70 } 71 val mem = new Bundle { 72 val req = DecoupledIO(new L2TlbMemReqBundle()) 73 val resp = Flipped(ValidIO(UInt(XLEN.W))) 74 val mask = Input(Bool()) 75 } 76 val pmp = new Bundle { 77 val req = ValidIO(new PMPReqBundle()) 78 val resp = Flipped(new PMPRespBundle()) 79 } 80 81 val refill = Output(new Bundle { 82 val req_info = new L2TlbInnerBundle() 83 val level = UInt(log2Up(Level + 1).W) 84 }) 85} 86 87class PTW()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents { 88 val io = IO(new PTWIO) 89 val sfence = io.sfence 90 val mem = io.mem 91 val req_s2xlate = Reg(UInt(2.W)) 92 val enableS2xlate = req_s2xlate =/= noS2xlate 93 val onlyS1xlate = req_s2xlate === onlyStage1 94 val onlyS2xlate = req_s2xlate === onlyStage2 95 val satp = Wire(new TlbSatpBundle()) 96 when (io.req.fire) { 97 satp := Mux(io.req.bits.req_info.s2xlate =/= noS2xlate, io.csr.vsatp, io.csr.satp) 98 } .otherwise { 99 satp := Mux(enableS2xlate, io.csr.vsatp, io.csr.satp) 100 } 101 val s1Pbmte = Mux(req_s2xlate =/= noS2xlate, io.csr.hPBMTE, io.csr.mPBMTE) 102 103 val mode = satp.mode 104 val hgatp = io.csr.hgatp 105 val flush = io.sfence.valid || io.csr.satp.changed || io.csr.vsatp.changed || io.csr.hgatp.changed 106 val s2xlate = enableS2xlate && !onlyS1xlate 107 val level = RegInit(3.U(log2Up(Level + 1).W)) 108 val af_level = RegInit(3.U(log2Up(Level + 1).W)) // access fault return this level 109 val gpf_level = RegInit(3.U(log2Up(Level + 1).W)) 110 val ppn = Reg(UInt(ptePPNLen.W)) 111 val vpn = Reg(UInt(vpnLen.W)) // vpn or gvpn(onlyS2xlate) 112 val levelNext = level - 1.U 113 val l3Hit = Reg(Bool()) 114 val l2Hit = Reg(Bool()) 115 val pte = mem.resp.bits.asTypeOf(new PteBundle()) 116 117 // s/w register 118 val s_pmp_check = RegInit(true.B) 119 val s_mem_req = RegInit(true.B) 120 val s_llptw_req = RegInit(true.B) 121 val w_mem_resp = RegInit(true.B) 122 val s_hptw_req = RegInit(true.B) 123 val w_hptw_resp = RegInit(true.B) 124 val s_last_hptw_req = RegInit(true.B) 125 val w_last_hptw_resp = RegInit(true.B) 126 // for updating "level" 127 val mem_addr_update = RegInit(false.B) 128 129 val idle = RegInit(true.B) 130 val finish = WireInit(false.B) 131 val sent_to_pmp = idle === false.B && (s_pmp_check === false.B || mem_addr_update) && !finish 132 133 val pageFault = pte.isPf(level, s1Pbmte) 134 val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, false.B, sent_to_pmp) 135 136 val hptw_pageFault = RegInit(false.B) 137 val hptw_accessFault = RegInit(false.B) 138 val need_last_s2xlate = RegInit(false.B) 139 val stage1Hit = RegEnable(io.req.bits.stage1Hit, io.req.fire) 140 val stage1 = RegEnable(io.req.bits.stage1, io.req.fire) 141 val hptw_resp_stage2 = Reg(Bool()) 142 143 val ppn_af = Mux(enableS2xlate, Mux(onlyS1xlate, pte.isAf(), false.B), pte.isAf()) // In two-stage address translation, stage 1 ppn is a vpn for host, so don't need to check ppn_high 144 val find_pte = pte.isLeaf() || ppn_af || pageFault 145 val to_find_pte = level === 1.U && find_pte === false.B 146 val source = RegEnable(io.req.bits.req_info.source, io.req.fire) 147 148 val l3addr = Wire(UInt(PAddrBits.W)) 149 val l2addr = Wire(UInt(PAddrBits.W)) 150 val l1addr = Wire(UInt(PAddrBits.W)) 151 val mem_addr = Wire(UInt(PAddrBits.W)) 152 153 l3addr := MakeAddr(satp.ppn, getVpnn(vpn, 3)) 154 if (EnableSv48) { 155 when (mode === Sv48) { 156 l2addr := MakeAddr(Mux(l3Hit, ppn, pte.getPPN()), getVpnn(vpn, 2)) 157 } .otherwise { 158 l2addr := MakeAddr(satp.ppn, getVpnn(vpn, 2)) 159 } 160 } else { 161 l2addr := MakeAddr(satp.ppn, getVpnn(vpn, 2)) 162 } 163 l1addr := MakeAddr(Mux(l2Hit, ppn, pte.getPPN()), getVpnn(vpn, 1)) 164 mem_addr := Mux(af_level === 3.U, l3addr, Mux(af_level === 2.U, l2addr, l1addr)) 165 166 val hptw_resp = Reg(new HptwResp) 167 val full_gvpn = Reg(UInt(ptePPNLen.W)) 168 val gpaddr = MuxCase(mem_addr, Seq( 169 (stage1Hit || onlyS2xlate) -> Cat(full_gvpn, 0.U(offLen.W)), 170 !s_last_hptw_req -> Cat(MuxLookup(level, pte.getPPN())(Seq( 171 3.U -> Cat(pte.getPPN()(ptePPNLen - 1, vpnnLen * 3), vpn(vpnnLen * 3 - 1, 0)), 172 2.U -> Cat(pte.getPPN()(ptePPNLen - 1, vpnnLen * 2), vpn(vpnnLen * 2 - 1, 0)), 173 1.U -> Cat(pte.getPPN()(ptePPNLen - 1, vpnnLen), vpn(vpnnLen - 1, 0) 174 ))), 175 0.U(offLen.W)) 176 )) 177 val gvpn_gpf = !(hptw_pageFault || hptw_accessFault ) && Mux(s2xlate && io.csr.hgatp.mode === Sv39x4, full_gvpn(ptePPNLen - 1, GPAddrBitsSv39x4 - offLen) =/= 0.U, Mux(s2xlate && io.csr.hgatp.mode === Sv48x4, full_gvpn(ptePPNLen - 1, GPAddrBitsSv48x4 - offLen) =/= 0.U, false.B)) 178 val guestFault = hptw_pageFault || hptw_accessFault || gvpn_gpf 179 val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr)) 180 val fake_h_resp = 0.U.asTypeOf(new HptwResp) 181 fake_h_resp.entry.tag := get_pn(gpaddr) 182 fake_h_resp.entry.vmid.map(_ := io.csr.hgatp.vmid) 183 fake_h_resp.gpf := true.B 184 185 val pte_valid = RegInit(false.B) // avoid l1tlb pf from stage1 when gpf happens in the first s2xlate in PTW 186 val fake_pte = 0.U.asTypeOf(new PteBundle()) 187 fake_pte.perm.v := false.B // tell L1TLB this is fake pte 188 fake_pte.ppn := ppn(ppnLen - 1, 0) 189 fake_pte.ppn_high := ppn(ptePPNLen - 1, ppnLen) 190 191 io.req.ready := idle 192 val ptw_resp = Wire(new PtwMergeResp) 193 ptw_resp.apply(Mux(pte_valid, pageFault && !accessFault, false.B), accessFault || (ppn_af && !(pte_valid && (pageFault || guestFault))), Mux(accessFault, af_level, Mux(guestFault, gpf_level, level)), Mux(pte_valid, pte, fake_pte), vpn, satp.asid, hgatp.vmid, vpn(sectortlbwidth - 1, 0), not_super = false, not_merge = false) 194 195 val normal_resp = idle === false.B && mem_addr_update && !need_last_s2xlate && (guestFault || (w_mem_resp && find_pte) || (s_pmp_check && accessFault) || onlyS2xlate ) 196 val stageHit_resp = idle === false.B && hptw_resp_stage2 197 io.resp.valid := Mux(stage1Hit, stageHit_resp, normal_resp) 198 io.resp.bits.source := source 199 io.resp.bits.resp := Mux(stage1Hit || (l3Hit || l2Hit) && guestFault && !pte_valid, stage1, ptw_resp) 200 io.resp.bits.h_resp := Mux(gvpn_gpf, fake_h_resp, hptw_resp) 201 io.resp.bits.s2xlate := req_s2xlate 202 203 io.llptw.valid := s_llptw_req === false.B && to_find_pte && !accessFault && !guestFault 204 io.llptw.bits.req_info.source := source 205 io.llptw.bits.req_info.vpn := vpn 206 io.llptw.bits.req_info.s2xlate := req_s2xlate 207 io.llptw.bits.ppn := DontCare 208 209 io.pmp.req.valid := DontCare // samecycle, do not use valid 210 io.pmp.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr) 211 io.pmp.req.bits.size := 3.U // TODO: fix it 212 io.pmp.req.bits.cmd := TlbCmd.read 213 214 mem.req.valid := s_mem_req === false.B && !mem.mask && !accessFault && s_pmp_check 215 mem.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr) 216 mem.req.bits.id := FsmReqID.U(bMemID.W) 217 mem.req.bits.hptw_bypassed := false.B 218 219 io.refill.req_info.s2xlate := req_s2xlate 220 io.refill.req_info.vpn := vpn 221 io.refill.level := level 222 io.refill.req_info.source := source 223 224 io.hptw.req.valid := !s_hptw_req || !s_last_hptw_req 225 io.hptw.req.bits.id := FsmReqID.U(bMemID.W) 226 io.hptw.req.bits.gvpn := get_pn(gpaddr) 227 io.hptw.req.bits.source := source 228 229 when (io.req.fire && io.req.bits.stage1Hit){ 230 idle := false.B 231 req_s2xlate := io.req.bits.req_info.s2xlate 232 s_last_hptw_req := false.B 233 hptw_resp_stage2 := false.B 234 need_last_s2xlate := false.B 235 hptw_pageFault := false.B 236 hptw_accessFault := false.B 237 full_gvpn := io.req.bits.stage1.genPPN() 238 } 239 240 when (io.resp.fire && stage1Hit){ 241 idle := true.B 242 } 243 244 when (io.req.fire && !io.req.bits.stage1Hit){ 245 val req = io.req.bits 246 val gvpn_wire = Wire(UInt(ptePPNLen.W)) 247 if (EnableSv48) { 248 when (mode === Sv48) { 249 level := Mux(req.l2Hit, 1.U, Mux(req.l3Hit.get, 2.U, 3.U)) 250 af_level := Mux(req.l2Hit, 1.U, Mux(req.l3Hit.get, 2.U, 3.U)) 251 gpf_level := Mux(req.l2Hit, 2.U, Mux(req.l3Hit.get, 3.U, 0.U)) 252 ppn := Mux(req.l2Hit || req.l3Hit.get, io.req.bits.ppn, satp.ppn) 253 l3Hit := req.l3Hit.get 254 gvpn_wire := Mux(req.l2Hit || req.l3Hit.get, io.req.bits.ppn, satp.ppn) 255 } .otherwise { 256 level := Mux(req.l2Hit, 1.U, 2.U) 257 af_level := Mux(req.l2Hit, 1.U, 2.U) 258 gpf_level := 0.U 259 ppn := Mux(req.l2Hit, io.req.bits.ppn, satp.ppn) 260 l3Hit := false.B 261 gvpn_wire := Mux(req.l2Hit, io.req.bits.ppn, satp.ppn) 262 } 263 } else { 264 level := Mux(req.l2Hit, 1.U, 2.U) 265 af_level := Mux(req.l2Hit, 1.U, 2.U) 266 gpf_level := 0.U 267 ppn := Mux(req.l2Hit, io.req.bits.ppn, satp.ppn) 268 l3Hit := false.B 269 gvpn_wire := Mux(req.l2Hit, io.req.bits.ppn, satp.ppn) 270 } 271 vpn := io.req.bits.req_info.vpn 272 l2Hit := req.l2Hit 273 accessFault := false.B 274 idle := false.B 275 hptw_pageFault := false.B 276 hptw_accessFault := false.B 277 pte_valid := false.B 278 req_s2xlate := io.req.bits.req_info.s2xlate 279 when(io.req.bits.req_info.s2xlate === onlyStage2){ 280 full_gvpn := io.req.bits.req_info.vpn 281 val onlys2_gpaddr = Cat(io.req.bits.req_info.vpn, 0.U(offLen.W)) // is 50 bits, don't need to check high bits when sv48x4 is enabled 282 val check_gpa_high_fail = Mux(io.req.bits.req_info.s2xlate === onlyStage2 && io.csr.hgatp.mode === Sv39x4, onlys2_gpaddr(onlys2_gpaddr.getWidth - 1, GPAddrBitsSv39x4) =/= 0.U, false.B) 283 need_last_s2xlate := false.B 284 when(check_gpa_high_fail){ 285 mem_addr_update := true.B 286 }.otherwise{ 287 s_last_hptw_req := false.B 288 } 289 }.elsewhen(io.req.bits.req_info.s2xlate === allStage){ 290 full_gvpn := 0.U 291 val allstage_gpaddr = Cat(gvpn_wire, 0.U(offLen.W)) 292 val check_gpa_high_fail = Mux(io.csr.hgatp.mode === Sv39x4, allstage_gpaddr(allstage_gpaddr.getWidth - 1, GPAddrBitsSv39x4) =/= 0.U, Mux(io.csr.hgatp.mode === Sv48x4, allstage_gpaddr(allstage_gpaddr.getWidth - 1, GPAddrBitsSv48x4) =/= 0.U, false.B)) 293 when(check_gpa_high_fail){ 294 mem_addr_update := true.B 295 }.otherwise{ 296 need_last_s2xlate := true.B 297 s_hptw_req := false.B 298 } 299 }.otherwise { 300 full_gvpn := 0.U 301 need_last_s2xlate := false.B 302 s_pmp_check := false.B 303 } 304 } 305 306 when(io.hptw.req.fire && s_hptw_req === false.B){ 307 s_hptw_req := true.B 308 w_hptw_resp := false.B 309 } 310 311 when(io.hptw.resp.fire && w_hptw_resp === false.B) { 312 w_hptw_resp := true.B 313 val g_perm_fail = !io.hptw.resp.bits.h_resp.gaf && (!io.hptw.resp.bits.h_resp.entry.perm.get.r && !(io.csr.priv.mxr && io.hptw.resp.bits.h_resp.entry.perm.get.x)) 314 hptw_pageFault := io.hptw.resp.bits.h_resp.gpf || g_perm_fail 315 hptw_accessFault := io.hptw.resp.bits.h_resp.gaf 316 hptw_resp := io.hptw.resp.bits.h_resp 317 hptw_resp.gpf := io.hptw.resp.bits.h_resp.gpf || g_perm_fail 318 when(!(g_perm_fail || io.hptw.resp.bits.h_resp.gpf || io.hptw.resp.bits.h_resp.gaf)) { 319 s_pmp_check := false.B 320 }.otherwise { 321 mem_addr_update := true.B 322 need_last_s2xlate := false.B 323 } 324 } 325 326 when(io.hptw.req.fire && s_last_hptw_req === false.B) { 327 w_last_hptw_resp := false.B 328 s_last_hptw_req := true.B 329 } 330 331 when (io.hptw.resp.fire && w_last_hptw_resp === false.B && stage1Hit){ 332 w_last_hptw_resp := true.B 333 hptw_resp_stage2 := true.B 334 hptw_resp := io.hptw.resp.bits.h_resp 335 } 336 337 when(io.hptw.resp.fire && w_last_hptw_resp === false.B && !stage1Hit){ 338 hptw_pageFault := io.hptw.resp.bits.h_resp.gpf 339 hptw_accessFault := io.hptw.resp.bits.h_resp.gaf 340 hptw_resp := io.hptw.resp.bits.h_resp 341 w_last_hptw_resp := true.B 342 mem_addr_update := true.B 343 } 344 345 when(sent_to_pmp && mem_addr_update === false.B){ 346 s_mem_req := false.B 347 s_pmp_check := true.B 348 } 349 350 when(accessFault && idle === false.B){ 351 s_pmp_check := true.B 352 s_mem_req := true.B 353 w_mem_resp := true.B 354 s_llptw_req := true.B 355 s_hptw_req := true.B 356 w_hptw_resp := true.B 357 s_last_hptw_req := true.B 358 w_last_hptw_resp := true.B 359 mem_addr_update := true.B 360 need_last_s2xlate := false.B 361 } 362 363 when(guestFault && idle === false.B){ 364 s_pmp_check := true.B 365 s_mem_req := true.B 366 w_mem_resp := true.B 367 s_llptw_req := true.B 368 s_hptw_req := true.B 369 w_hptw_resp := true.B 370 s_last_hptw_req := true.B 371 w_last_hptw_resp := true.B 372 mem_addr_update := true.B 373 need_last_s2xlate := false.B 374 } 375 376 when (mem.req.fire){ 377 s_mem_req := true.B 378 w_mem_resp := false.B 379 } 380 381 when(mem.resp.fire && w_mem_resp === false.B){ 382 w_mem_resp := true.B 383 af_level := af_level - 1.U 384 s_llptw_req := false.B 385 mem_addr_update := true.B 386 gpf_level := Mux(mode === Sv39 && !pte_valid && !(l3Hit || l2Hit), gpf_level - 2.U, gpf_level - 1.U) 387 pte_valid := true.B 388 full_gvpn := pte.getPPN() 389 } 390 391 when(mem_addr_update){ 392 when(level >= 2.U && !onlyS2xlate && !(guestFault || find_pte || accessFault)) { 393 level := levelNext 394 when(s2xlate){ 395 s_hptw_req := false.B 396 }.otherwise{ 397 s_mem_req := false.B 398 } 399 s_llptw_req := true.B 400 mem_addr_update := false.B 401 }.elsewhen(io.llptw.valid){ 402 when(io.llptw.fire) { 403 idle := true.B 404 s_llptw_req := true.B 405 mem_addr_update := false.B 406 need_last_s2xlate := false.B 407 } 408 finish := true.B 409 }.elsewhen(s2xlate && need_last_s2xlate === true.B) { 410 need_last_s2xlate := false.B 411 when(!(guestFault || accessFault || pageFault || ppn_af)){ 412 s_last_hptw_req := false.B 413 mem_addr_update := false.B 414 } 415 }.elsewhen(io.resp.valid){ 416 when(io.resp.fire) { 417 idle := true.B 418 s_llptw_req := true.B 419 mem_addr_update := false.B 420 accessFault := false.B 421 } 422 finish := true.B 423 } 424 } 425 426 427 when (flush) { 428 idle := true.B 429 s_pmp_check := true.B 430 s_mem_req := true.B 431 s_llptw_req := true.B 432 w_mem_resp := true.B 433 accessFault := false.B 434 mem_addr_update := false.B 435 s_hptw_req := true.B 436 w_hptw_resp := true.B 437 s_last_hptw_req := true.B 438 w_last_hptw_resp := true.B 439 } 440 441 442 XSDebug(p"[ptw] level:${level} notFound:${pageFault}\n") 443 444 // perf 445 XSPerfAccumulate("fsm_count", io.req.fire) 446 for (i <- 0 until PtwWidth) { 447 XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire && io.req.bits.req_info.source === i.U) 448 } 449 XSPerfAccumulate("fsm_busy", !idle) 450 XSPerfAccumulate("fsm_idle", idle) 451 XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready) 452 XSPerfAccumulate("ptw_ppn_af", io.resp.fire && ppn_af) 453 XSPerfAccumulate("mem_count", mem.req.fire) 454 XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire, true)) 455 XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready) 456 457 val perfEvents = Seq( 458 ("fsm_count ", io.req.fire ), 459 ("fsm_busy ", !idle ), 460 ("fsm_idle ", idle ), 461 ("resp_blocked ", io.resp.valid && !io.resp.ready ), 462 ("mem_count ", mem.req.fire ), 463 ("mem_cycle ", BoolStopWatch(mem.req.fire, mem.resp.fire, true)), 464 ("mem_blocked ", mem.req.valid && !mem.req.ready ), 465 ) 466 generatePerfEvent() 467} 468 469/*========================= LLPTW ==============================*/ 470 471/** LLPTW : Last Level Page Table Walker 472 * the page walker that only takes 4KB(last level) page walk. 473 **/ 474 475class LLPTWInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst { 476 val req_info = Output(new L2TlbInnerBundle()) 477 val ppn = Output(UInt(ptePPNLen.W)) 478} 479 480class LLPTWIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 481 val in = Flipped(DecoupledIO(new LLPTWInBundle())) 482 val out = DecoupledIO(new Bundle { 483 val req_info = Output(new L2TlbInnerBundle()) 484 val id = Output(UInt(bMemID.W)) 485 val h_resp = Output(new HptwResp) 486 val first_s2xlate_fault = Output(Bool()) // Whether the first stage 2 translation occurs pf/af 487 val af = Output(Bool()) 488 }) 489 val mem = new Bundle { 490 val req = DecoupledIO(new L2TlbMemReqBundle()) 491 val resp = Flipped(Valid(new Bundle { 492 val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W)) 493 val value = Output(UInt(blockBits.W)) 494 })) 495 val enq_ptr = Output(UInt(log2Ceil(l2tlbParams.llptwsize).W)) 496 val buffer_it = Output(Vec(l2tlbParams.llptwsize, Bool())) 497 val refill = Output(new L2TlbInnerBundle()) 498 val req_mask = Input(Vec(l2tlbParams.llptwsize, Bool())) 499 val flush_latch = Input(Vec(l2tlbParams.llptwsize, Bool())) 500 } 501 val cache = DecoupledIO(new L2TlbInnerBundle()) 502 val pmp = new Bundle { 503 val req = Valid(new PMPReqBundle()) 504 val resp = Flipped(new PMPRespBundle()) 505 } 506 val hptw = new Bundle { 507 val req = DecoupledIO(new Bundle{ 508 val source = UInt(bSourceWidth.W) 509 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 510 val gvpn = UInt(ptePPNLen.W) 511 }) 512 val resp = Flipped(Valid(new Bundle { 513 val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W)) 514 val h_resp = Output(new HptwResp) 515 })) 516 } 517} 518 519class LLPTWEntry(implicit p: Parameters) extends XSBundle with HasPtwConst { 520 val req_info = new L2TlbInnerBundle() 521 val ppn = UInt(ptePPNLen.W) 522 val wait_id = UInt(log2Up(l2tlbParams.llptwsize).W) 523 val af = Bool() 524 val hptw_resp = new HptwResp() 525 val first_s2xlate_fault = Output(Bool()) 526} 527 528 529class LLPTW(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents { 530 val io = IO(new LLPTWIO()) 531 val enableS2xlate = io.in.bits.req_info.s2xlate =/= noS2xlate 532 val satp = Mux(enableS2xlate, io.csr.vsatp, io.csr.satp) 533 val s1Pbmte = Mux(enableS2xlate, io.csr.hPBMTE, io.csr.mPBMTE) 534 535 val flush = io.sfence.valid || io.csr.satp.changed || io.csr.vsatp.changed || io.csr.hgatp.changed 536 val entries = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(0.U.asTypeOf(new LLPTWEntry())))) 537 val state_idle :: state_hptw_req :: state_hptw_resp :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: state_last_hptw_req :: state_last_hptw_resp :: state_cache :: Nil = Enum(10) 538 val state = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(state_idle))) 539 540 val is_emptys = state.map(_ === state_idle) 541 val is_mems = state.map(_ === state_mem_req) 542 val is_waiting = state.map(_ === state_mem_waiting) 543 val is_having = state.map(_ === state_mem_out) 544 val is_cache = state.map(_ === state_cache) 545 val is_hptw_req = state.map(_ === state_hptw_req) 546 val is_last_hptw_req = state.map(_ === state_last_hptw_req) 547 val is_hptw_resp = state.map(_ === state_hptw_resp) 548 val is_last_hptw_resp = state.map(_ === state_last_hptw_resp) 549 550 val full = !ParallelOR(is_emptys).asBool 551 val enq_ptr = ParallelPriorityEncoder(is_emptys) 552 553 val mem_ptr = ParallelPriorityEncoder(is_having) // TODO: optimize timing, bad: entries -> ptr -> entry 554 val mem_arb = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize)) 555 for (i <- 0 until l2tlbParams.llptwsize) { 556 mem_arb.io.in(i).bits := entries(i) 557 mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i) 558 } 559 560 // process hptw requests in serial 561 val hyper_arb1 = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize)) 562 for (i <- 0 until l2tlbParams.llptwsize) { 563 hyper_arb1.io.in(i).bits := entries(i) 564 hyper_arb1.io.in(i).valid := is_hptw_req(i) && !(Cat(is_hptw_resp).orR) && !(Cat(is_last_hptw_resp).orR) 565 } 566 val hyper_arb2 = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize)) 567 for(i <- 0 until l2tlbParams.llptwsize) { 568 hyper_arb2.io.in(i).bits := entries(i) 569 hyper_arb2.io.in(i).valid := is_last_hptw_req(i) && !(Cat(is_hptw_resp).orR) && !(Cat(is_last_hptw_resp).orR) 570 } 571 572 val cache_ptr = ParallelMux(is_cache, (0 until l2tlbParams.llptwsize).map(_.U(log2Up(l2tlbParams.llptwsize).W))) 573 574 // duplicate req 575 // to_wait: wait for the last to access mem, set to mem_resp 576 // to_cache: the last is back just right now, set to mem_cache 577 val dup_vec = state.indices.map(i => 578 dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn) && io.in.bits.req_info.s2xlate === entries(i).req_info.s2xlate 579 ) 580 val dup_req_fire = mem_arb.io.out.fire && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) && io.in.bits.req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate // dup with the req fire entry 581 val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entries, sending mem req already 582 val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now 583 val dup_vec_last_hptw = dup_vec.zipWithIndex.map{case (d, i) => d && (is_last_hptw_req(i) || is_last_hptw_resp(i))} 584 val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id))) 585 val dup_wait_resp = io.mem.resp.fire && VecInit(dup_vec_wait)(io.mem.resp.bits.id) && !io.mem.flush_latch(io.mem.resp.bits.id) // dup with the entry that data coming next cycle 586 val to_wait = Cat(dup_vec_wait).orR || dup_req_fire 587 val to_mem_out = dup_wait_resp && ((entries(io.mem.resp.bits.id).req_info.s2xlate === noS2xlate) || (entries(io.mem.resp.bits.id).req_info.s2xlate === onlyStage1)) 588 val to_cache = Cat(dup_vec_having).orR || Cat(dup_vec_last_hptw).orR 589 val to_hptw_req = io.in.bits.req_info.s2xlate === allStage 590 val to_last_hptw_req = dup_wait_resp && entries(io.mem.resp.bits.id).req_info.s2xlate === allStage 591 val last_hptw_req_id = io.mem.resp.bits.id 592 val req_paddr = MakeAddr(io.in.bits.ppn(ppnLen-1, 0), getVpnn(io.in.bits.req_info.vpn, 0)) 593 val req_hpaddr = MakeAddr(entries(last_hptw_req_id).hptw_resp.genPPNS2(get_pn(req_paddr)), getVpnn(io.in.bits.req_info.vpn, 0)) 594 val index = Mux(entries(last_hptw_req_id).req_info.s2xlate === allStage, req_hpaddr, req_paddr)(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8)) 595 val last_hptw_req_ppn = io.mem.resp.bits.value.asTypeOf(Vec(blockBits / XLEN, new PteBundle()))(index).getPPN() 596 XSError(RegNext(dup_req_fire && Cat(dup_vec_wait).orR, init = false.B), "mem req but some entries already waiting, should not happed") 597 598 XSError(io.in.fire && ((to_mem_out && to_cache) || (to_wait && to_cache)), "llptw enq, to cache conflict with to mem") 599 val mem_resp_hit = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(false.B))) 600 val enq_state_normal = MuxCase(state_addr_check, Seq( 601 to_mem_out -> state_mem_out, // same to the blew, but the mem resp now 602 to_last_hptw_req -> state_last_hptw_req, 603 to_wait -> state_mem_waiting, 604 to_cache -> state_cache, 605 to_hptw_req -> state_hptw_req 606 )) 607 val enq_state = Mux(from_pre(io.in.bits.req_info.source) && enq_state_normal =/= state_addr_check, state_idle, enq_state_normal) 608 when (io.in.fire) { 609 // if prefetch req does not need mem access, just give it up. 610 // so there will be at most 1 + FilterSize entries that needs re-access page cache 611 // so 2 + FilterSize is enough to avoid dead-lock 612 state(enq_ptr) := enq_state 613 entries(enq_ptr).req_info := io.in.bits.req_info 614 entries(enq_ptr).ppn := Mux(to_last_hptw_req, last_hptw_req_ppn, io.in.bits.ppn) 615 entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr) 616 entries(enq_ptr).af := false.B 617 entries(enq_ptr).hptw_resp := Mux(to_last_hptw_req, entries(last_hptw_req_id).hptw_resp, Mux(to_wait, entries(wait_id).hptw_resp, entries(enq_ptr).hptw_resp)) 618 entries(enq_ptr).first_s2xlate_fault := false.B 619 mem_resp_hit(enq_ptr) := to_mem_out || to_last_hptw_req 620 } 621 622 val enq_ptr_reg = RegNext(enq_ptr) 623 val need_addr_check = GatedValidRegNext(enq_state === state_addr_check && io.in.fire && !flush) 624 625 val hasHptwResp = ParallelOR(state.map(_ === state_hptw_resp)).asBool 626 val hptw_resp_ptr_reg = RegNext(io.hptw.resp.bits.id) 627 val hptw_need_addr_check = RegNext(hasHptwResp && io.hptw.resp.fire && !flush) && state(hptw_resp_ptr_reg) === state_addr_check 628 629 val ptes = io.mem.resp.bits.value.asTypeOf(Vec(blockBits / XLEN, new PteBundle())) 630 val gpaddr = MakeGPAddr(entries(hptw_resp_ptr_reg).ppn, getVpnn(entries(hptw_resp_ptr_reg).req_info.vpn, 0)) 631 val hptw_resp = entries(hptw_resp_ptr_reg).hptw_resp 632 val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr)) 633 val addr = RegEnable(MakeAddr(io.in.bits.ppn(ppnLen - 1, 0), getVpnn(io.in.bits.req_info.vpn, 0)), io.in.fire) 634 io.pmp.req.valid := need_addr_check || hptw_need_addr_check 635 io.pmp.req.bits.addr := Mux(hptw_need_addr_check, hpaddr, addr) 636 io.pmp.req.bits.cmd := TlbCmd.read 637 io.pmp.req.bits.size := 3.U // TODO: fix it 638 val pmp_resp_valid = io.pmp.req.valid // same cycle 639 when (pmp_resp_valid) { 640 // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before 641 // when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare 642 val ptr = Mux(hptw_need_addr_check, hptw_resp_ptr_reg, enq_ptr_reg); 643 val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio 644 entries(ptr).af := accessFault 645 state(ptr) := Mux(accessFault, state_mem_out, state_mem_req) 646 } 647 648 when (mem_arb.io.out.fire) { 649 for (i <- state.indices) { 650 when (state(i) =/= state_idle && state(i) =/= state_mem_out && state(i) =/= state_last_hptw_req && state(i) =/= state_last_hptw_resp 651 && entries(i).req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate 652 && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) { 653 // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait" 654 state(i) := state_mem_waiting 655 entries(i).hptw_resp := entries(mem_arb.io.chosen).hptw_resp 656 entries(i).wait_id := mem_arb.io.chosen 657 } 658 } 659 } 660 when (io.mem.resp.fire) { 661 state.indices.map{i => 662 when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) { 663 val req_paddr = MakeAddr(entries(i).ppn, getVpnn(entries(i).req_info.vpn, 0)) 664 val req_hpaddr = MakeAddr(entries(i).hptw_resp.genPPNS2(get_pn(req_paddr)), getVpnn(entries(i).req_info.vpn, 0)) 665 val index = Mux(entries(i).req_info.s2xlate === allStage, req_hpaddr, req_paddr)(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8)) 666 state(i) := Mux(entries(i).req_info.s2xlate === allStage && !(ptes(index).isPf(0.U, s1Pbmte) || !ptes(index).isLeaf() || ptes(index).isAf() || ptes(index).isStage1Gpf(io.csr.vsatp.mode)) 667 , state_last_hptw_req, state_mem_out) 668 mem_resp_hit(i) := true.B 669 entries(i).ppn := ptes(index).getPPN() // for last stage 2 translation 670 entries(i).hptw_resp.gpf := Mux(entries(i).req_info.s2xlate === allStage, ptes(index).isStage1Gpf(io.csr.vsatp.mode), false.B) 671 } 672 } 673 } 674 675 when (hyper_arb1.io.out.fire) { 676 for (i <- state.indices) { 677 when (state(i) === state_hptw_req && entries(i).ppn === hyper_arb1.io.out.bits.ppn && entries(i).req_info.s2xlate === allStage && hyper_arb1.io.chosen === i.U) { 678 state(i) := state_hptw_resp 679 entries(i).wait_id := hyper_arb1.io.chosen 680 } 681 } 682 } 683 684 when (hyper_arb2.io.out.fire) { 685 for (i <- state.indices) { 686 when (state(i) === state_last_hptw_req && entries(i).ppn === hyper_arb2.io.out.bits.ppn && entries(i).req_info.s2xlate === allStage && hyper_arb2.io.chosen === i.U) { 687 state(i) := state_last_hptw_resp 688 entries(i).wait_id := hyper_arb2.io.chosen 689 } 690 } 691 } 692 693 when (io.hptw.resp.fire) { 694 for (i <- state.indices) { 695 when (state(i) === state_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id && io.hptw.resp.bits.h_resp.entry.tag === entries(i).ppn) { 696 val check_g_perm_fail = !io.hptw.resp.bits.h_resp.gaf && (!io.hptw.resp.bits.h_resp.entry.perm.get.r && !(io.csr.priv.mxr && io.hptw.resp.bits.h_resp.entry.perm.get.x)) 697 when (check_g_perm_fail || io.hptw.resp.bits.h_resp.gaf || io.hptw.resp.bits.h_resp.gpf) { 698 state(i) := state_mem_out 699 entries(i).hptw_resp := io.hptw.resp.bits.h_resp 700 entries(i).hptw_resp.gpf := io.hptw.resp.bits.h_resp.gpf || check_g_perm_fail 701 entries(i).first_s2xlate_fault := io.hptw.resp.bits.h_resp.gaf || io.hptw.resp.bits.h_resp.gpf 702 }.otherwise{ // change the entry that is waiting hptw resp 703 val need_to_waiting_vec = state.indices.map(i => state(i) === state_mem_waiting && dup(entries(i).req_info.vpn, entries(io.hptw.resp.bits.id).req_info.vpn)) 704 val waiting_index = ParallelMux(need_to_waiting_vec zip entries.map(_.wait_id)) 705 state(i) := Mux(Cat(need_to_waiting_vec).orR, state_mem_waiting, state_addr_check) 706 entries(i).hptw_resp := io.hptw.resp.bits.h_resp 707 entries(i).wait_id := Mux(Cat(need_to_waiting_vec).orR, waiting_index, entries(i).wait_id) 708 //To do: change the entry that is having the same hptw req 709 } 710 } 711 when (state(i) === state_last_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id && io.hptw.resp.bits.h_resp.entry.tag === entries(i).ppn) { 712 state(i) := state_mem_out 713 entries(i).hptw_resp := io.hptw.resp.bits.h_resp 714 //To do: change the entry that is having the same hptw req 715 } 716 } 717 } 718 when (io.out.fire) { 719 assert(state(mem_ptr) === state_mem_out) 720 state(mem_ptr) := state_idle 721 } 722 mem_resp_hit.map(a => when (a) { a := false.B } ) 723 724 when (io.cache.fire) { 725 state(cache_ptr) := state_idle 726 } 727 XSError(io.out.fire && io.cache.fire && (mem_ptr === cache_ptr), "mem resp and cache fire at the same time at same entry") 728 729 when (flush) { 730 state.map(_ := state_idle) 731 } 732 733 io.in.ready := !full 734 735 io.out.valid := ParallelOR(is_having).asBool 736 io.out.bits.req_info := entries(mem_ptr).req_info 737 io.out.bits.id := mem_ptr 738 io.out.bits.af := entries(mem_ptr).af 739 io.out.bits.h_resp := entries(mem_ptr).hptw_resp 740 io.out.bits.first_s2xlate_fault := entries(mem_ptr).first_s2xlate_fault 741 742 val hptw_req_arb = Module(new Arbiter(new Bundle{ 743 val source = UInt(bSourceWidth.W) 744 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 745 val ppn = UInt(ptePPNLen.W) 746 } , 2)) 747 // first stage 2 translation 748 hptw_req_arb.io.in(0).valid := hyper_arb1.io.out.valid 749 hptw_req_arb.io.in(0).bits.source := hyper_arb1.io.out.bits.req_info.source 750 hptw_req_arb.io.in(0).bits.ppn := hyper_arb1.io.out.bits.ppn 751 hptw_req_arb.io.in(0).bits.id := hyper_arb1.io.chosen 752 hyper_arb1.io.out.ready := hptw_req_arb.io.in(0).ready 753 // last stage 2 translation 754 hptw_req_arb.io.in(1).valid := hyper_arb2.io.out.valid 755 hptw_req_arb.io.in(1).bits.source := hyper_arb2.io.out.bits.req_info.source 756 hptw_req_arb.io.in(1).bits.ppn := hyper_arb2.io.out.bits.ppn 757 hptw_req_arb.io.in(1).bits.id := hyper_arb2.io.chosen 758 hyper_arb2.io.out.ready := hptw_req_arb.io.in(1).ready 759 hptw_req_arb.io.out.ready := io.hptw.req.ready 760 io.hptw.req.valid := hptw_req_arb.io.out.fire && !flush 761 io.hptw.req.bits.gvpn := hptw_req_arb.io.out.bits.ppn 762 io.hptw.req.bits.id := hptw_req_arb.io.out.bits.id 763 io.hptw.req.bits.source := hptw_req_arb.io.out.bits.source 764 765 io.mem.req.valid := mem_arb.io.out.valid && !flush 766 val mem_paddr = MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0)) 767 val mem_hpaddr = MakeAddr(mem_arb.io.out.bits.hptw_resp.genPPNS2(get_pn(mem_paddr)), getVpnn(mem_arb.io.out.bits.req_info.vpn, 0)) 768 io.mem.req.bits.addr := Mux(mem_arb.io.out.bits.req_info.s2xlate === allStage, mem_hpaddr, mem_paddr) 769 io.mem.req.bits.id := mem_arb.io.chosen 770 io.mem.req.bits.hptw_bypassed := false.B 771 mem_arb.io.out.ready := io.mem.req.ready 772 val mem_refill_id = RegNext(io.mem.resp.bits.id(log2Up(l2tlbParams.llptwsize)-1, 0)) 773 io.mem.refill := entries(mem_refill_id).req_info 774 io.mem.refill.s2xlate := entries(mem_refill_id).req_info.s2xlate 775 io.mem.buffer_it := mem_resp_hit 776 io.mem.enq_ptr := enq_ptr 777 778 io.cache.valid := Cat(is_cache).orR 779 io.cache.bits := ParallelMux(is_cache, entries.map(_.req_info)) 780 781 XSPerfAccumulate("llptw_in_count", io.in.fire) 782 XSPerfAccumulate("llptw_in_block", io.in.valid && !io.in.ready) 783 for (i <- 0 until 7) { 784 XSPerfAccumulate(s"enq_state${i}", io.in.fire && enq_state === i.U) 785 } 786 for (i <- 0 until (l2tlbParams.llptwsize + 1)) { 787 XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U) 788 XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U) 789 XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U) 790 } 791 XSPerfAccumulate("mem_count", io.mem.req.fire) 792 XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U) 793 XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready) 794 795 val perfEvents = Seq( 796 ("tlbllptw_incount ", io.in.fire ), 797 ("tlbllptw_inblock ", io.in.valid && !io.in.ready), 798 ("tlbllptw_memcount ", io.mem.req.fire ), 799 ("tlbllptw_memcycle ", PopCount(is_waiting) ), 800 ) 801 generatePerfEvent() 802} 803 804/*========================= HPTW ==============================*/ 805 806/** HPTW : Hypervisor Page Table Walker 807 * the page walker take the virtual machine's page walk. 808 * guest physical address translation, guest physical address -> host physical address 809 **/ 810class HPTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 811 val req = Flipped(DecoupledIO(new Bundle { 812 val source = UInt(bSourceWidth.W) 813 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 814 val gvpn = UInt(gvpnLen.W) 815 val ppn = UInt(ppnLen.W) 816 val l3Hit = if (EnableSv48) Some(new Bool()) else None 817 val l2Hit = Bool() 818 val l1Hit = Bool() 819 val bypassed = Bool() // if bypass, don't refill 820 })) 821 val resp = DecoupledIO(new Bundle { 822 val source = UInt(bSourceWidth.W) 823 val resp = Output(new HptwResp()) 824 val id = Output(UInt(bMemID.W)) 825 }) 826 827 val mem = new Bundle { 828 val req = DecoupledIO(new L2TlbMemReqBundle()) 829 val resp = Flipped(ValidIO(UInt(XLEN.W))) 830 val mask = Input(Bool()) 831 } 832 val refill = Output(new Bundle { 833 val req_info = new L2TlbInnerBundle() 834 val level = UInt(log2Up(Level + 1).W) 835 }) 836 val pmp = new Bundle { 837 val req = ValidIO(new PMPReqBundle()) 838 val resp = Flipped(new PMPRespBundle()) 839 } 840} 841 842class HPTW()(implicit p: Parameters) extends XSModule with HasPtwConst { 843 val io = IO(new HPTWIO) 844 val hgatp = io.csr.hgatp 845 val mpbmte = io.csr.mPBMTE 846 val sfence = io.sfence 847 val flush = sfence.valid || hgatp.changed || io.csr.satp.changed || io.csr.vsatp.changed 848 val mode = hgatp.mode 849 850 val level = RegInit(3.U(log2Up(Level + 1).W)) 851 val af_level = RegInit(3.U(log2Up(Level + 1).W)) // access fault return this level 852 val gpaddr = Reg(UInt(GPAddrBits.W)) 853 val req_ppn = Reg(UInt(ppnLen.W)) 854 val vpn = gpaddr(GPAddrBits-1, offLen) 855 val levelNext = level - 1.U 856 val l3Hit = Reg(Bool()) 857 val l2Hit = Reg(Bool()) 858 val l1Hit = Reg(Bool()) 859 val bypassed = Reg(Bool()) 860// val pte = io.mem.resp.bits.MergeRespToPte() 861 val pte = io.mem.resp.bits.asTypeOf(new PteBundle().cloneType) 862 val ppn_l3 = Mux(l3Hit, req_ppn, pte.ppn) 863 val ppn_l2 = Mux(l2Hit, req_ppn, pte.ppn) 864 val ppn_l1 = Mux(l1Hit, req_ppn, pte.ppn) 865 val ppn = Wire(UInt(PAddrBits.W)) 866 val p_pte = MakeAddr(ppn, getVpnn(vpn, level)) 867 val pg_base = Wire(UInt(PAddrBits.W)) 868 val mem_addr = Wire(UInt(PAddrBits.W)) 869 if (EnableSv48) { 870 when (mode === Sv48) { 871 ppn := Mux(af_level === 2.U, ppn_l3, Mux(af_level === 1.U, ppn_l2, ppn_l1)) // for l2, l1 and l3 872 pg_base := MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 3.U, mode = Sv48)) // for l3 873 mem_addr := Mux(af_level === 3.U, pg_base, p_pte) 874 } .otherwise { 875 ppn := Mux(af_level === 1.U, ppn_l2, ppn_l1) //for l1 and l2 876 pg_base := MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 2.U, mode = Sv39)) 877 mem_addr := Mux(af_level === 2.U, pg_base, p_pte) 878 } 879 } else { 880 ppn := Mux(af_level === 1.U, ppn_l2, ppn_l1) //for l1 and l2 881 pg_base := MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 2.U, mode = Sv39)) 882 mem_addr := Mux(af_level === 2.U, pg_base, p_pte) 883 } 884 885 //s/w register 886 val s_pmp_check = RegInit(true.B) 887 val s_mem_req = RegInit(true.B) 888 val w_mem_resp = RegInit(true.B) 889 val idle = RegInit(true.B) 890 val mem_addr_update = RegInit(false.B) 891 val finish = WireInit(false.B) 892 893 val sent_to_pmp = !idle && (!s_pmp_check || mem_addr_update) && !finish 894 val pageFault = pte.isGpf(level, mpbmte) || (!pte.isLeaf() && level === 0.U) 895 val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp) 896 897 val ppn_af = pte.isAf() 898 val find_pte = pte.isLeaf() || ppn_af || pageFault 899 900 val resp_valid = !idle && mem_addr_update && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault)) 901 val id = Reg(UInt(log2Up(l2tlbParams.llptwsize).W)) 902 val source = RegEnable(io.req.bits.source, io.req.fire) 903 904 io.req.ready := idle 905 val resp = Wire(new HptwResp()) 906 // accessFault > pageFault > ppn_af 907 resp.apply( 908 gpf = pageFault && !accessFault, 909 gaf = accessFault || (ppn_af && !pageFault), 910 level = Mux(accessFault, af_level, level), 911 pte = pte, 912 vpn = vpn, 913 vmid = hgatp.vmid 914 ) 915 io.resp.valid := resp_valid 916 io.resp.bits.id := id 917 io.resp.bits.resp := resp 918 io.resp.bits.source := source 919 920 io.pmp.req.valid := DontCare 921 io.pmp.req.bits.addr := mem_addr 922 io.pmp.req.bits.size := 3.U 923 io.pmp.req.bits.cmd := TlbCmd.read 924 925 io.mem.req.valid := !s_mem_req && !io.mem.mask && !accessFault && s_pmp_check 926 io.mem.req.bits.addr := mem_addr 927 io.mem.req.bits.id := HptwReqId.U(bMemID.W) 928 io.mem.req.bits.hptw_bypassed := bypassed 929 930 io.refill.req_info.vpn := vpn 931 io.refill.level := level 932 io.refill.req_info.source := source 933 io.refill.req_info.s2xlate := onlyStage2 934 when (idle){ 935 when(io.req.fire){ 936 bypassed := io.req.bits.bypassed 937 idle := false.B 938 gpaddr := Cat(io.req.bits.gvpn, 0.U(offLen.W)) 939 accessFault := false.B 940 s_pmp_check := false.B 941 id := io.req.bits.id 942 req_ppn := io.req.bits.ppn 943 if (EnableSv48) { 944 when (mode === Sv48) { 945 level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, Mux(io.req.bits.l3Hit.get, 2.U, 3.U))) 946 af_level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, Mux(io.req.bits.l3Hit.get, 2.U, 3.U))) 947 l3Hit := io.req.bits.l3Hit.get 948 } .otherwise { 949 level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U)) 950 af_level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U)) 951 l3Hit := false.B 952 } 953 } else { 954 level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U)) 955 af_level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U)) 956 l3Hit := false.B 957 } 958 l2Hit := io.req.bits.l2Hit 959 l1Hit := io.req.bits.l1Hit 960 } 961 } 962 963 when(sent_to_pmp && !mem_addr_update){ 964 s_mem_req := false.B 965 s_pmp_check := true.B 966 } 967 968 when(accessFault && !idle){ 969 s_pmp_check := true.B 970 s_mem_req := true.B 971 w_mem_resp := true.B 972 mem_addr_update := true.B 973 } 974 975 when(io.mem.req.fire){ 976 s_mem_req := true.B 977 w_mem_resp := false.B 978 } 979 980 when(io.mem.resp.fire && !w_mem_resp){ 981 w_mem_resp := true.B 982 af_level := af_level - 1.U 983 mem_addr_update := true.B 984 } 985 986 when(mem_addr_update){ 987 when(!(find_pte || accessFault)){ 988 level := levelNext 989 s_mem_req := false.B 990 mem_addr_update := false.B 991 }.elsewhen(resp_valid){ 992 when(io.resp.fire){ 993 idle := true.B 994 mem_addr_update := false.B 995 accessFault := false.B 996 } 997 finish := true.B 998 } 999 } 1000 when (flush) { 1001 idle := true.B 1002 s_pmp_check := true.B 1003 s_mem_req := true.B 1004 w_mem_resp := true.B 1005 accessFault := false.B 1006 mem_addr_update := false.B 1007 } 1008} 1009