1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants} 24import utils._ 25import utility._ 26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} 27import freechips.rocketchip.tilelink._ 28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 29 30/** Page Table Walk is divided into two parts 31 * One, PTW: page walk for pde, except for leaf entries, one by one 32 * Two, LLPTW: page walk for pte, only the leaf entries(4KB), in parallel 33 */ 34 35 36/** PTW : page table walker 37 * a finite state machine 38 * only take 1GB and 2MB page walks 39 * or in other words, except the last level(leaf) 40 **/ 41class PTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 42 val req = Flipped(DecoupledIO(new Bundle { 43 val req_info = new L2TlbInnerBundle() 44 val l1Hit = Bool() 45 val ppn = UInt(ppnLen.W) 46 val stage1Hit = Bool() 47 val stage1 = new PtwMergeResp 48 })) 49 val resp = DecoupledIO(new Bundle { 50 val source = UInt(bSourceWidth.W) 51 val s2xlate = UInt(2.W) 52 val resp = new PtwMergeResp 53 val h_resp = new HptwResp 54 }) 55 56 val llptw = DecoupledIO(new LLPTWInBundle()) 57 // NOTE: llptw change from "connect to llptw" to "connect to page cache" 58 // to avoid corner case that caused duplicate entries 59 60 val hptw = new Bundle { 61 val req = DecoupledIO(new Bundle { 62 val source = UInt(bSourceWidth.W) 63 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 64 val gvpn = UInt(vpnLen.W) 65 }) 66 val resp = Flipped(Valid(new Bundle { 67 val h_resp = Output(new HptwResp) 68 })) 69 } 70 val mem = new Bundle { 71 val req = DecoupledIO(new L2TlbMemReqBundle()) 72 val resp = Flipped(ValidIO(UInt(XLEN.W))) 73 val mask = Input(Bool()) 74 } 75 val pmp = new Bundle { 76 val req = ValidIO(new PMPReqBundle()) 77 val resp = Flipped(new PMPRespBundle()) 78 } 79 80 val refill = Output(new Bundle { 81 val req_info = new L2TlbInnerBundle() 82 val level = UInt(log2Up(Level).W) 83 }) 84} 85 86class PTW()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents { 87 val io = IO(new PTWIO) 88 val sfence = io.sfence 89 val mem = io.mem 90 val req_s2xlate = Reg(UInt(2.W)) 91 val enableS2xlate = req_s2xlate =/= noS2xlate 92 val onlyS1xlate = req_s2xlate === onlyStage1 93 val onlyS2xlate = req_s2xlate === onlyStage2 94 95 val satp = Mux(enableS2xlate, io.csr.vsatp, io.csr.satp) 96 val hgatp = io.csr.hgatp 97 val flush = io.sfence.valid || satp.changed 98 val s2xlate = enableS2xlate && !onlyS1xlate 99 val level = RegInit(0.U(log2Up(Level).W)) 100 val af_level = RegInit(0.U(log2Up(Level).W)) // access fault return this level 101 val ppn = Reg(UInt(ppnLen.W)) 102 val vpn = Reg(UInt(vpnLen.W)) // vpn or gvpn 103 val levelNext = level + 1.U 104 val l1Hit = Reg(Bool()) 105 val pte = mem.resp.bits.asTypeOf(new PteBundle().cloneType) 106 107 // s/w register 108 val s_pmp_check = RegInit(true.B) 109 val s_mem_req = RegInit(true.B) 110 val s_llptw_req = RegInit(true.B) 111 val w_mem_resp = RegInit(true.B) 112 val s_hptw_req = RegInit(true.B) 113 val w_hptw_resp = RegInit(true.B) 114 val s_last_hptw_req = RegInit(true.B) 115 val w_last_hptw_resp = RegInit(true.B) 116 // for updating "level" 117 val mem_addr_update = RegInit(false.B) 118 119 val idle = RegInit(true.B) 120 val finish = WireInit(false.B) 121 val sent_to_pmp = idle === false.B && (s_pmp_check === false.B || mem_addr_update) && !finish 122 123 val pageFault = pte.isPf(level) 124 val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp) 125 126 val hptw_pageFault = RegInit(false.B) 127 val hptw_accessFault = RegInit(false.B) 128 val last_s2xlate = RegInit(false.B) 129 val stage1Hit = RegEnable(io.req.bits.stage1Hit, io.req.fire()) 130 val stage1 = RegEnable(io.req.bits.stage1, io.req.fire()) 131 val hptw_resp_stage2 = Reg(Bool()) 132 133 val ppn_af = pte.isAf() 134 val find_pte = pte.isLeaf() || ppn_af || pageFault 135 val to_find_pte = level === 1.U && find_pte === false.B 136 val source = RegEnable(io.req.bits.req_info.source, io.req.fire) 137 138 val l1addr = MakeAddr(satp.ppn, getVpnn(vpn, 2)) 139 val l2addr = MakeAddr(Mux(l1Hit, ppn, pte.ppn), getVpnn(vpn, 1)) 140 val mem_addr = Mux(af_level === 0.U, l1addr, l2addr) 141 142 val hptw_resp = RegEnable(io.hptw.resp.bits.h_resp, io.hptw.resp.fire()) 143 val gpaddr = MuxCase(mem_addr, Seq( 144 stage1Hit -> Cat(stage1.genPPN(), 0.U(offLen.W)), 145 onlyS2xlate -> Cat(vpn, 0.U(offLen.W)), 146 !s_last_hptw_req -> Cat(pte.ppn, 0.U(offLen.W)) 147 )) 148 val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr)) 149 150 io.req.ready := idle 151 val ptw_resp = Wire(new PtwMergeResp) 152 ptw_resp.apply(pageFault && !accessFault && !ppn_af, accessFault || ppn_af, Mux(accessFault, af_level,level), pte, vpn, satp.asid, hgatp.asid, vpn(sectortlbwidth - 1, 0), not_super = false) 153 154 val normal_resp = idle === false.B && mem_addr_update && !last_s2xlate && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault) || onlyS2xlate) 155 val stageHit_resp = idle === false.B && hptw_resp_stage2 156 io.resp.valid := Mux(stage1Hit, stageHit_resp, normal_resp) 157 io.resp.bits.source := source 158 io.resp.bits.resp := Mux(stage1Hit, stage1, ptw_resp) 159 io.resp.bits.h_resp := hptw_resp 160 io.resp.bits.s2xlate := req_s2xlate 161 162 io.llptw.valid := s_llptw_req === false.B && to_find_pte && !accessFault 163 io.llptw.bits.req_info.source := source 164 io.llptw.bits.req_info.vpn := vpn 165 io.llptw.bits.req_info.s2xlate := req_s2xlate 166 io.llptw.bits.ppn := DontCare 167 168 io.pmp.req.valid := DontCare // samecycle, do not use valid 169 io.pmp.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr) 170 io.pmp.req.bits.size := 3.U // TODO: fix it 171 io.pmp.req.bits.cmd := TlbCmd.read 172 173 mem.req.valid := s_mem_req === false.B && !mem.mask && !accessFault && s_pmp_check 174 mem.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr) 175 mem.req.bits.id := FsmReqID.U(bMemID.W) 176 177 io.refill.req_info.s2xlate := Mux(enableS2xlate, onlyStage1, req_s2xlate) // ptw refill the pte of stage 1 when s2xlate is enabled 178 io.refill.req_info.vpn := vpn 179 io.refill.level := level 180 io.refill.req_info.source := source 181 182 io.hptw.req.valid := !s_hptw_req || !s_last_hptw_req 183 io.hptw.req.bits.id := FsmReqID.U(bMemID.W) 184 io.hptw.req.bits.gvpn := get_pn(gpaddr) 185 io.hptw.req.bits.source := source 186 187 when (io.req.fire() && io.req.bits.stage1Hit){ 188 idle := false.B 189 req_s2xlate := io.req.bits.req_info.s2xlate 190 s_hptw_req := false.B 191 hptw_resp_stage2 := false.B 192 } 193 194 when (io.hptw.resp.fire() && w_hptw_resp === false.B && stage1Hit){ 195 w_hptw_resp := true.B 196 hptw_resp_stage2 := true.B 197 } 198 199 when (io.resp.fire() && stage1Hit){ 200 idle := true.B 201 } 202 203 when (io.req.fire() && !io.req.bits.stage1Hit){ 204 val req = io.req.bits 205 level := Mux(req.l1Hit, 1.U, 0.U) 206 af_level := Mux(req.l1Hit, 1.U, 0.U) 207 ppn := Mux(req.l1Hit, io.req.bits.ppn, satp.ppn) 208 vpn := io.req.bits.req_info.vpn 209 l1Hit := req.l1Hit 210 accessFault := false.B 211 idle := false.B 212 hptw_pageFault := false.B 213 req_s2xlate := io.req.bits.req_info.s2xlate 214 when(io.req.bits.req_info.s2xlate =/= noS2xlate && io.req.bits.req_info.s2xlate =/= onlyStage1){ 215 last_s2xlate := true.B 216 s_hptw_req := false.B 217 }.otherwise { 218 s_pmp_check := false.B 219 } 220 } 221 222 when(io.hptw.req.fire() && s_hptw_req === false.B){ 223 s_hptw_req := true.B 224 w_hptw_resp := false.B 225 } 226 227 when(io.hptw.resp.fire() && w_hptw_resp === false.B && !stage1Hit) { 228 hptw_pageFault := io.hptw.resp.bits.h_resp.gpf 229 hptw_accessFault := io.hptw.resp.bits.h_resp.gaf 230 w_hptw_resp := true.B 231 when(onlyS2xlate){ 232 mem_addr_update := true.B 233 last_s2xlate := false.B 234 }.otherwise { 235 s_pmp_check := false.B 236 } 237 } 238 239 when(io.hptw.req.fire() && s_last_hptw_req === false.B) { 240 w_last_hptw_resp := false.B 241 s_last_hptw_req := true.B 242 } 243 244 when(io.hptw.resp.fire() && w_last_hptw_resp === false.B){ 245 hptw_pageFault := io.hptw.resp.bits.h_resp.gpf 246 hptw_accessFault := io.hptw.resp.bits.h_resp.gaf 247 w_last_hptw_resp := true.B 248 mem_addr_update := true.B 249 last_s2xlate := false.B 250 } 251 252 when(sent_to_pmp && mem_addr_update === false.B){ 253 s_mem_req := false.B 254 s_pmp_check := true.B 255 } 256 257 when(accessFault && idle === false.B){ 258 s_pmp_check := true.B 259 s_mem_req := true.B 260 w_mem_resp := true.B 261 s_llptw_req := true.B 262 s_hptw_req := true.B 263 w_hptw_resp := true.B 264 s_last_hptw_req := true.B 265 w_last_hptw_resp := true.B 266 mem_addr_update := true.B 267 last_s2xlate := false.B 268 } 269 270 when (mem.req.fire){ 271 s_mem_req := true.B 272 w_mem_resp := false.B 273 } 274 275 when(mem.resp.fire && w_mem_resp === false.B){ 276 w_mem_resp := true.B 277 af_level := af_level + 1.U 278 s_llptw_req := false.B 279 mem_addr_update := true.B 280 } 281 282 when(mem_addr_update){ 283 when(level === 0.U && !(find_pte || accessFault)){ 284 level := levelNext 285 when(s2xlate){ 286 s_hptw_req := false.B 287 }.otherwise{ 288 s_mem_req := false.B 289 } 290 s_llptw_req := true.B 291 mem_addr_update := false.B 292 }.elsewhen(io.llptw.valid){ 293 when(io.llptw.fire) { 294 idle := true.B 295 s_llptw_req := true.B 296 mem_addr_update := false.B 297 last_s2xlate := false.B 298 } 299 finish := true.B 300 }.elsewhen(s2xlate && last_s2xlate === true.B) { 301 s_last_hptw_req := false.B 302 mem_addr_update := false.B 303 }.elsewhen(io.resp.valid){ 304 when(io.resp.fire) { 305 idle := true.B 306 s_llptw_req := true.B 307 mem_addr_update := false.B 308 accessFault := false.B 309 } 310 finish := true.B 311 } 312 } 313 314 315 when (sfence.valid) { 316 idle := true.B 317 s_pmp_check := true.B 318 s_mem_req := true.B 319 s_llptw_req := true.B 320 w_mem_resp := true.B 321 accessFault := false.B 322 mem_addr_update := false.B 323 s_hptw_req := true.B 324 w_hptw_resp := true.B 325 s_last_hptw_req := true.B 326 w_last_hptw_resp := true.B 327 } 328 329 330 XSDebug(p"[ptw] level:${level} notFound:${pageFault}\n") 331 332 // perf 333 XSPerfAccumulate("fsm_count", io.req.fire) 334 for (i <- 0 until PtwWidth) { 335 XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire && io.req.bits.req_info.source === i.U) 336 } 337 XSPerfAccumulate("fsm_busy", !idle) 338 XSPerfAccumulate("fsm_idle", idle) 339 XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready) 340 XSPerfAccumulate("ptw_ppn_af", io.resp.fire && ppn_af) 341 XSPerfAccumulate("mem_count", mem.req.fire) 342 XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire, true)) 343 XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready) 344 345 TimeOutAssert(!idle, timeOutThreshold, "page table walker time out") 346 347 val perfEvents = Seq( 348 ("fsm_count ", io.req.fire ), 349 ("fsm_busy ", !idle ), 350 ("fsm_idle ", idle ), 351 ("resp_blocked ", io.resp.valid && !io.resp.ready ), 352 ("mem_count ", mem.req.fire ), 353 ("mem_cycle ", BoolStopWatch(mem.req.fire, mem.resp.fire, true)), 354 ("mem_blocked ", mem.req.valid && !mem.req.ready ), 355 ) 356 generatePerfEvent() 357} 358 359/*========================= LLPTW ==============================*/ 360 361/** LLPTW : Last Level Page Table Walker 362 * the page walker that only takes 4KB(last level) page walk. 363 **/ 364 365class LLPTWInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst { 366 val req_info = Output(new L2TlbInnerBundle()) 367 val ppn = Output(if(HasHExtension) UInt((vpnLen.max(ppnLen)).W) else UInt(ppnLen.W)) 368} 369 370class LLPTWIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 371 val in = Flipped(DecoupledIO(new LLPTWInBundle())) 372 val out = DecoupledIO(new Bundle { 373 val req_info = Output(new L2TlbInnerBundle()) 374 val id = Output(UInt(bMemID.W)) 375 val h_resp = Output(new HptwResp) 376 val af = Output(Bool()) 377 }) 378 val mem = new Bundle { 379 val req = DecoupledIO(new L2TlbMemReqBundle()) 380 val resp = Flipped(Valid(new Bundle { 381 val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W)) 382 val value = Output(UInt(XLEN.W)) 383 })) 384 val enq_ptr = Output(UInt(log2Ceil(l2tlbParams.llptwsize).W)) 385 val buffer_it = Output(Vec(l2tlbParams.llptwsize, Bool())) 386 val refill = Output(new L2TlbInnerBundle()) 387 val req_mask = Input(Vec(l2tlbParams.llptwsize, Bool())) 388 } 389 val cache = DecoupledIO(new L2TlbInnerBundle()) 390 val pmp = new Bundle { 391 val req = Valid(new PMPReqBundle()) 392 val resp = Flipped(new PMPRespBundle()) 393 } 394 val hptw = new Bundle { 395 val req = DecoupledIO(new Bundle{ 396 val source = UInt(bSourceWidth.W) 397 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 398 val gvpn = UInt(vpnLen.W) 399 }) 400 val resp = Flipped(Valid(new Bundle { 401 val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W)) 402 val h_resp = Output(new HptwResp) 403 })) 404 } 405} 406 407class LLPTWEntry(implicit p: Parameters) extends XSBundle with HasPtwConst { 408 val req_info = new L2TlbInnerBundle() 409 val s2xlate = Bool() 410 val ppn = UInt(ppnLen.W) 411 val wait_id = UInt(log2Up(l2tlbParams.llptwsize).W) 412 val af = Bool() 413 val hptw_resp = new HptwResp() 414} 415 416 417class LLPTW(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents { 418 val io = IO(new LLPTWIO()) 419 val enableS2xlate = io.in.bits.req_info.s2xlate =/= noS2xlate 420 val satp = Mux(enableS2xlate, io.csr.vsatp, io.csr.satp) 421 422 val flush = io.sfence.valid || satp.changed 423 val entries = Reg(Vec(l2tlbParams.llptwsize, new LLPTWEntry())) 424 val state_idle :: state_hptw_req :: state_hptw_resp :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: state_last_hptw_req :: state_last_hptw_resp :: state_cache :: Nil = Enum(10) 425 val state = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(state_idle))) 426 427 val is_emptys = state.map(_ === state_idle) 428 val is_mems = state.map(_ === state_mem_req) 429 val is_waiting = state.map(_ === state_mem_waiting) 430 val is_having = state.map(_ === state_mem_out) 431 val is_cache = state.map(_ === state_cache) 432 val is_hptw_req = state.map(_ === state_hptw_req) 433 val is_last_hptw_req = state.map(_ === state_last_hptw_req) 434 435 val full = !ParallelOR(is_emptys).asBool 436 val enq_ptr = ParallelPriorityEncoder(is_emptys) 437 438 val mem_ptr = ParallelPriorityEncoder(is_having) // TODO: optimize timing, bad: entries -> ptr -> entry 439 val mem_arb = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize)) 440 for (i <- 0 until l2tlbParams.llptwsize) { 441 mem_arb.io.in(i).bits := entries(i) 442 mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i) 443 } 444 val hyper_arb1 = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize)) 445 for (i <- 0 until l2tlbParams.llptwsize) { 446 hyper_arb1.io.in(i).bits := entries(i) 447 hyper_arb1.io.in(i).valid := is_hptw_req(i) 448 } 449 val hyper_arb2 = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize)) 450 for(i <- 0 until l2tlbParams.llptwsize) { 451 hyper_arb2.io.in(i).bits := entries(i) 452 hyper_arb2.io.in(i).valid := is_last_hptw_req(i) 453 } 454 455 val cache_ptr = ParallelMux(is_cache, (0 until l2tlbParams.llptwsize).map(_.U(log2Up(l2tlbParams.llptwsize).W))) 456 457 // duplicate req 458 // to_wait: wait for the last to access mem, set to mem_resp 459 // to_cache: the last is back just right now, set to mem_cache 460 val dup_vec = state.indices.map(i => 461 dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn) && io.in.bits.req_info.s2xlate === entries(i).req_info.s2xlate 462 ) 463 val dup_req_fire = mem_arb.io.out.fire && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) && io.in.bits.req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate // dup with the req fire entry 464 val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entres, sending mem req already 465 val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now 466 val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id))) 467 val dup_wait_resp = io.mem.resp.fire && VecInit(dup_vec_wait)(io.mem.resp.bits.id) // dup with the entry that data coming next cycle 468 val to_wait = Cat(dup_vec_wait).orR || dup_req_fire 469 val to_mem_out = dup_wait_resp 470 val to_cache = Cat(dup_vec_having).orR 471 val to_hptw = io.in.bits.req_info.s2xlate =/= noS2xlate 472 XSError(RegNext(dup_req_fire && Cat(dup_vec_wait).orR, init = false.B), "mem req but some entries already waiting, should not happed") 473 474 XSError(io.in.fire && ((to_mem_out && to_cache) || (to_wait && to_cache)), "llptw enq, to cache conflict with to mem") 475 val mem_resp_hit = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(false.B))) 476 val enq_state_normal = MuxCase(state_addr_check, Seq( 477 to_mem_out -> state_mem_out, // same to the blew, but the mem resp now 478 to_wait -> state_mem_waiting, 479 to_cache -> state_cache, 480 to_hptw -> state_hptw_req 481 )) 482 val enq_state = Mux(from_pre(io.in.bits.req_info.source) && enq_state_normal =/= state_addr_check, state_idle, enq_state_normal) 483 when (io.in.fire) { 484 // if prefetch req does not need mem access, just give it up. 485 // so there will be at most 1 + FilterSize entries that needs re-access page cache 486 // so 2 + FilterSize is enough to avoid dead-lock 487 state(enq_ptr) := enq_state 488 entries(enq_ptr).req_info := io.in.bits.req_info 489 entries(enq_ptr).ppn := io.in.bits.ppn 490 entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr) 491 entries(enq_ptr).af := false.B 492 entries(enq_ptr).s2xlate := enableS2xlate 493 mem_resp_hit(enq_ptr) := to_mem_out 494 } 495 496 val enq_ptr_reg = RegNext(enq_ptr) 497 val need_addr_check = RegNext(enq_state === state_addr_check && io.in.fire() && !flush) 498 499 val hasHptwResp = ParallelOR(state.map(_ === state_hptw_resp)).asBool 500 val hptw_resp_ptr_reg = RegNext(io.hptw.resp.bits.id) 501 val hptw_need_addr_check = RegNext(hasHptwResp && io.hptw.resp.fire() && !flush) 502 503 val pte = io.mem.resp.bits.value.asTypeOf(new PteBundle().cloneType) 504 val gpaddr = MakeGPAddr(io.in.bits.ppn, getVpnn(io.in.bits.req_info.vpn, 0)) 505 val hptw_resp = io.hptw.resp.bits.h_resp 506 val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr)) 507 val hpaddr_reg = RegEnable(hpaddr, hasHptwResp && io.hptw.resp.fire()) 508 val addr = MakeAddr(io.in.bits.ppn, getVpnn(io.in.bits.req_info.vpn, 0)) 509 val addr_reg = RegEnable(addr, io.in.fire()) 510 io.pmp.req.valid := need_addr_check || hptw_need_addr_check 511 io.pmp.req.bits.addr := Mux(enableS2xlate, hpaddr, addr) 512 io.pmp.req.bits.cmd := TlbCmd.read 513 io.pmp.req.bits.size := 3.U // TODO: fix it 514 val pmp_resp_valid = io.pmp.req.valid // same cycle 515 when (pmp_resp_valid) { 516 // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before 517 // when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare 518 val ptr = Mux(hptw_need_addr_check, hptw_resp_ptr_reg, enq_ptr_reg); 519 val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio 520 entries(ptr).af := accessFault 521 state(ptr) := Mux(accessFault, state_mem_out, state_mem_req) 522 } 523 524 when (mem_arb.io.out.fire) { 525 for (i <- state.indices) { 526 when (state(i) =/= state_idle && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) { 527 // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait" 528 state(i) := state_mem_waiting 529 entries(i).wait_id := mem_arb.io.chosen 530 } 531 } 532 } 533 when (io.mem.resp.fire) { 534 state.indices.map{i => 535 when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) { 536 state(i) := Mux(entries(i).s2xlate, state_last_hptw_req, state_mem_out) 537 mem_resp_hit(i) := true.B 538 entries(i).ppn := pte.ppn // for last stage 2 translation 539 } 540 } 541 } 542 543 when (DelayN(io.mem.resp.fire(), 1)) { 544 state.indices.map{i => 545 when (state(i) === state_last_hptw_req && io.mem.resp.bits.id === entries(i).wait_id) { 546 entries(i).ppn := pte.ppn // for last stage 2 translation 547 } 548 } 549 } 550 551 when (hyper_arb1.io.out.fire()) { 552 for (i <- state.indices) { 553 when (state(i) === state_hptw_req && entries(i).ppn === hyper_arb1.io.out.bits.ppn && entries(i).s2xlate) { 554 state(i) := state_hptw_resp 555 entries(i).wait_id := hyper_arb1.io.chosen 556 } 557 } 558 } 559 560 when (hyper_arb2.io.out.fire()) { 561 for (i <- state.indices) { 562 when (state(i) === state_last_hptw_req && entries(i).ppn === hyper_arb2.io.out.bits.ppn && entries(i).s2xlate) { 563 state(i) := state_last_hptw_resp 564 entries(i).wait_id := hyper_arb2.io.chosen 565 } 566 } 567 } 568 569 when (io.hptw.resp.fire()) { 570 for (i <- state.indices) { 571 when (state(i) === state_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id) { 572 state(i) := state_addr_check 573 entries(i).hptw_resp := io.hptw.resp.bits.h_resp 574 } 575 when (state(i) === state_last_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id) { 576 state(i) := state_mem_out 577 entries(i).hptw_resp := io.hptw.resp.bits.h_resp 578 } 579 } 580 } 581 582 when (io.out.fire) { 583 assert(state(mem_ptr) === state_mem_out) 584 state(mem_ptr) := state_idle 585 } 586 mem_resp_hit.map(a => when (a) { a := false.B } ) 587 588 when (io.cache.fire) { 589 state(cache_ptr) := state_idle 590 } 591 XSError(io.out.fire && io.cache.fire && (mem_ptr === cache_ptr), "mem resp and cache fire at the same time at same entry") 592 593 when (flush) { 594 state.map(_ := state_idle) 595 } 596 597 io.in.ready := !full 598 599 io.out.valid := ParallelOR(is_having).asBool 600 io.out.bits.req_info := entries(mem_ptr).req_info 601 io.out.bits.id := mem_ptr 602 io.out.bits.af := entries(mem_ptr).af 603 io.out.bits.h_resp := entries(mem_ptr).hptw_resp 604 605 val hptw_req_gvpn_1 = hyper_arb1.io.out.bits.ppn // first stage 2 translation 606 val hptw_req_gvpn_2 = hyper_arb2.io.out.bits.ppn // last stage 2 translation 607 io.hptw.req.valid := (hyper_arb1.io.out.valid || hyper_arb2.io.out.valid) && !flush 608 io.hptw.req.bits.gvpn := Mux(hyper_arb1.io.out.valid, hptw_req_gvpn_1, hptw_req_gvpn_2) 609 io.hptw.req.bits.id := Mux(hyper_arb1.io.out.valid, hyper_arb1.io.chosen, hyper_arb2.io.chosen) 610 io.hptw.req.bits.source := Mux(hyper_arb1.io.out.valid, hyper_arb1.io.out.bits.req_info.source, hyper_arb2.io.out.bits.req_info.source) 611 hyper_arb1.io.out.ready := io.hptw.req.ready 612 hyper_arb2.io.out.ready := io.hptw.req.ready 613 614 io.mem.req.valid := mem_arb.io.out.valid && !flush 615 val mem_paddr = MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0)) 616 val mem_hpaddr = MakeAddr(mem_arb.io.out.bits.hptw_resp.genPPNS2(get_pn(mem_paddr)), getVpnn(mem_arb.io.out.bits.req_info.vpn, 0)) 617 io.mem.req.bits.addr := Mux(mem_arb.io.out.bits.s2xlate, mem_hpaddr, mem_paddr) 618 io.mem.req.bits.id := mem_arb.io.chosen 619 mem_arb.io.out.ready := io.mem.req.ready 620 val mem_refill_id = RegNext(io.mem.resp.bits.id(log2Up(l2tlbParams.llptwsize)-1, 0)) 621 io.mem.refill := entries(mem_refill_id).req_info 622 io.mem.refill.s2xlate := Mux(entries(mem_refill_id).req_info.s2xlate === noS2xlate, noS2xlate, onlyStage1) // llptw refill the pte of stage 1 623 io.mem.buffer_it := mem_resp_hit 624 io.mem.enq_ptr := enq_ptr 625 626 io.cache.valid := Cat(is_cache).orR 627 io.cache.bits := ParallelMux(is_cache, entries.map(_.req_info)) 628 629 XSPerfAccumulate("llptw_in_count", io.in.fire) 630 XSPerfAccumulate("llptw_in_block", io.in.valid && !io.in.ready) 631 for (i <- 0 until 7) { 632 XSPerfAccumulate(s"enq_state${i}", io.in.fire && enq_state === i.U) 633 } 634 for (i <- 0 until (l2tlbParams.llptwsize + 1)) { 635 XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U) 636 XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U) 637 XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U) 638 } 639 XSPerfAccumulate("mem_count", io.mem.req.fire) 640 XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U) 641 XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready) 642 643 for (i <- 0 until l2tlbParams.llptwsize) { 644 TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}") 645 } 646 647 val perfEvents = Seq( 648 ("tlbllptw_incount ", io.in.fire ), 649 ("tlbllptw_inblock ", io.in.valid && !io.in.ready), 650 ("tlbllptw_memcount ", io.mem.req.fire ), 651 ("tlbllptw_memcycle ", PopCount(is_waiting) ), 652 ) 653 generatePerfEvent() 654} 655 656/*========================= HPTW ==============================*/ 657 658/** HPTW : Hypervisor Page Table Walker 659 * the page walker take the virtual machine's page walk. 660 * guest physical address translation, guest physical address -> host physical address 661 **/ 662class HPTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 663 val req = Flipped(DecoupledIO(new Bundle { 664 val source = UInt(bSourceWidth.W) 665 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 666 val gvpn = UInt(vpnLen.W) 667 val ppn = UInt(ppnLen.W) 668 val l1Hit = Bool() 669 val l2Hit = Bool() 670 })) 671 val resp = Valid(new Bundle { 672 val source = UInt(bSourceWidth.W) 673 val resp = Output(new HptwResp()) 674 val id = Output(UInt(bMemID.W)) 675 }) 676 677 val mem = new Bundle { 678 val req = DecoupledIO(new L2TlbMemReqBundle()) 679 val resp = Flipped(ValidIO(UInt(XLEN.W))) 680 val mask = Input(Bool()) 681 } 682 val refill = Output(new Bundle { 683 val req_info = new L2TlbInnerBundle() 684 val level = UInt(log2Up(Level).W) 685 }) 686 val pmp = new Bundle { 687 val req = ValidIO(new PMPReqBundle()) 688 val resp = Flipped(new PMPRespBundle()) 689 } 690} 691 692class HPTW()(implicit p: Parameters) extends XSModule with HasPtwConst { 693 val io = IO(new HPTWIO) 694 val hgatp = io.csr.hgatp 695 val sfence = io.sfence 696 val flush = sfence.valid || hgatp.changed 697 698 val level = RegInit(0.U(log2Up(Level).W)) 699 val gpaddr = Reg(UInt(GPAddrBits.W)) 700 val vpn = gpaddr(GPAddrBits-1, offLen) 701 val levelNext = level + 1.U 702 val l1Hit = Reg(Bool()) 703 val l2Hit = Reg(Bool()) 704 val pg_base = MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 2.U)) // for l0 705// val pte = io.mem.resp.bits.MergeRespToPte() 706 val pte = io.mem.resp.bits.asTypeOf(new PteBundle().cloneType) 707 val ppn_l1 = Mux(l1Hit, io.req.bits.ppn, pte.ppn) 708 val ppn_l2 = Mux(l2Hit, io.req.bits.ppn, pte.ppn) 709 val ppn = Mux(level === 1.U, ppn_l1, ppn_l2) //for l1 and l2 710 val p_pte = MakeAddr(ppn, getVpnn(vpn, 2.U - level)) 711 val mem_addr = Mux(level === 0.U, pg_base, p_pte) 712 713 //s/w register 714 val s_pmp_check = RegInit(true.B) 715 val s_mem_req = RegInit(true.B) 716 val w_mem_resp = RegInit(true.B) 717 val idle = RegInit(true.B) 718 val mem_addr_update = RegInit(false.B) 719 val finish = WireInit(false.B) 720 721 val sent_to_pmp = !idle && (!s_pmp_check || mem_addr_update) && !finish 722 val pageFault = pte.isPf(level) 723 val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp) 724 725 val ppn_af = pte.isAf() 726 val find_pte = pte.isLeaf() || ppn_af || pageFault 727 728 val resp_valid = !idle && mem_addr_update && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault)) 729 val id = Reg(UInt(log2Up(l2tlbParams.llptwsize).W)) 730 val source = RegEnable(io.req.bits.source, io.req.fire()) 731 732 io.req.ready := idle 733 val resp = Wire(new HptwResp()) 734 resp.apply(pageFault && !accessFault && !ppn_af, accessFault || ppn_af, level, pte, vpn, hgatp.asid) 735 io.resp.valid := resp_valid 736 io.resp.bits.id := id 737 io.resp.bits.resp := resp 738 io.resp.bits.source := source 739 740 io.pmp.req.valid := DontCare 741 io.pmp.req.bits.addr := mem_addr 742 io.pmp.req.bits.size := 3.U 743 io.pmp.req.bits.cmd := TlbCmd.read 744 745 io.mem.req.valid := !s_mem_req && !io.mem.mask && !accessFault && s_pmp_check 746 io.mem.req.bits.addr := mem_addr 747 io.mem.req.bits.id := HptwReqId.U(bMemID.W) 748 749 io.refill.req_info.vpn := vpn 750 io.refill.level := level 751 io.refill.req_info.source := source 752 io.refill.req_info.s2xlate := onlyStage2 753 when (idle){ 754 when(io.req.fire()){ 755 level := Mux(io.req.bits.l2Hit, 2.U, Mux(io.req.bits.l1Hit, 1.U, 0.U)) 756 idle := false.B 757 gpaddr := Cat(io.req.bits.gvpn, 0.U(offLen.W)) 758 accessFault := false.B 759 s_pmp_check := false.B 760 id := io.req.bits.id 761 l1Hit := io.req.bits.l1Hit 762 l2Hit := io.req.bits.l2Hit 763 } 764 } 765 766 when(sent_to_pmp && !mem_addr_update){ 767 s_mem_req := false.B 768 s_pmp_check := true.B 769 } 770 771 when(accessFault && !idle){ 772 s_pmp_check := true.B 773 s_mem_req := true.B 774 w_mem_resp := true.B 775 mem_addr_update := true.B 776 } 777 778 when(io.mem.req.fire()){ 779 s_mem_req := true.B 780 w_mem_resp := false.B 781 } 782 783 when(io.mem.resp.fire() && !w_mem_resp){ 784 w_mem_resp := true.B 785 mem_addr_update := true.B 786 } 787 788 when(mem_addr_update){ 789 when(!(find_pte || accessFault)){ 790 level := levelNext 791 s_mem_req := false.B 792 mem_addr_update := false.B 793 }.elsewhen(resp_valid){ 794 when(io.resp.fire()){ 795 idle := true.B 796 mem_addr_update := false.B 797 accessFault := false.B 798 } 799 finish := true.B 800 } 801 } 802}